diff --git a/.circleci/config.yml b/.circleci/config.yml index 62c29203dac1..36f0774131a5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ _defaults: &defaults docker: # CircleCI maintains a library of pre-built images # documented at https://circleci.com/developer/images/image/cimg/python - - image: cimg/python:3.11.13 + - image: cimg/python:3.12.12 working_directory: ~/repo @@ -53,7 +53,7 @@ jobs: - run: name: build NumPy command: | - python3.11 -m venv venv + python3.12 -m venv venv . venv/bin/activate pip install --progress-bar=off -r requirements/test_requirements.txt \ -r requirements/build_requirements.txt \ diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index b237d52424ac..bb88ed20b8ba 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -64,10 +64,12 @@ body: - type: textarea attributes: - label: "Context for the issue:" + label: "How does this issue affect you or how did you find it:" description: | - Please explain how this issue affects your work or why it should be prioritized. + Please explain how this issue concretely affects you or others. + Especially if it does not impact you how did you find it? + (If an issue has no concrete impact this is also helpful to know.) placeholder: | - << your explanation here >> + << description of how the issue affects you >> validations: required: false diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 3cb690fa494c..3805118aa665 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,16 +1,27 @@ - + +### First time committer introduction + -* HIT ALL THE GUIDELINES: - https://numpy.org/devdocs/dev/index.html#guidelines +#### AI Disclosure + diff --git a/.github/check-warnings/msvc-allowed-warnings.txt b/.github/check-warnings/msvc-allowed-warnings.txt index 7d2c149629ec..e3da83492e16 100644 --- a/.github/check-warnings/msvc-allowed-warnings.txt +++ b/.github/check-warnings/msvc-allowed-warnings.txt @@ -1,31 +1,21 @@ -../numpy/linalg/lapack_lite/f2c.c(194): warning C4244: 'return': conversion from 'double' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_c_lapack.c(230): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_c_lapack.c(250): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/_core/src/common/npy_cpu_features.c(451): warning C4098: 'npy__cpu_cpuid': 'void' function returning a value ../numpy/linalg/lapack_lite/f2c_c_lapack.c(1530): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_s_lapack.c(1625): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_s_lapack.c(1645): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_s_lapack.c(2865): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_s_lapack.c(2882): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_s_lapack.c(2894): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_config.c(1368): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c.c(194): warning C4244: 'return': conversion from 'double' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_c_lapack.c(230): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_c_lapack.c(250): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_c_lapack.c(1530): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_config.c(1368): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(1625): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(1645): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(2865): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(2882): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(2894): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -numpy/random/_generator.cp311-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' -numpy/random/_generator.cp311-win_arm64.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' -numpy/random/_generator.cp311-win32.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' -numpy/random/_generator.cp311-win32.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' -cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' -cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX512' +../numpy/random/src/mt19937/mt19937.c(88): warning C4146: unary minus operator applied to unsigned type, result still unsigned +../numpy/random/src/mt19937/mt19937.c(92): warning C4146: unary minus operator applied to unsigned type, result still unsigned +../numpy/random/src/mt19937/mt19937.c(95): warning C4146: unary minus operator applied to unsigned type, result still unsigned +..\numpy\random\src/pcg64/pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned +C:\a\numpy\numpy\numpy\random\src\pcg64\pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned +D:\a\numpy\numpy\numpy\random\src\pcg64\pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' -D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(52): warning C4309: 'argument': truncation of constant value -D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(53): warning C4309: 'argument': truncation of constant value -D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(65): warning C4309: 'argument': truncation of constant value -D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(139): warning C4556: - +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(26345): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(38369): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26345): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(38369): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 171f3019883a..ceebf268351d 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,9 +4,26 @@ updates: directory: / schedule: interval: daily + cooldown: + default-days: 7 commit-message: prefix: "MAINT" labels: - "03 - Maintenance" ignore: - dependency-name: "bus1/cabuild" + - package-ecosystem: pip + directory: /requirements + schedule: + interval: daily + cooldown: + default-days: 7 + commit-message: + prefix: "MAINT" + labels: + - "03 - Maintenance" + ignore: + - dependency-name: "scipy-openblas32" + - dependency-name: "scipy-openblas64" + - dependency-name: "jupyterlite-pyodide-kernel" + - dependency-name: "sphinx" diff --git a/.github/windows_arm64_steps/action.yml b/.github/windows_arm64_steps/action.yml deleted file mode 100644 index 8ecb3b8a0cdd..000000000000 --- a/.github/windows_arm64_steps/action.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Build Dependencies(Win-ARM64) -description: "Setup LLVM for Win-ARM64 builds" - -runs: - using: "composite" - steps: - - name: Install LLVM with checksum verification - shell: pwsh - run: | - Invoke-WebRequest https://github.com/llvm/llvm-project/releases/download/llvmorg-20.1.6/LLVM-20.1.6-woa64.exe -UseBasicParsing -OutFile LLVM-woa64.exe - $expectedHash = "92f69a1134e32e54b07d51c6e24d9594852f6476f32c3d70471ae00fffc2d462" - $fileHash = (Get-FileHash -Path "LLVM-woa64.exe" -Algorithm SHA256).Hash - if ($fileHash -ne $expectedHash) { - Write-Error "Checksum verification failed. The downloaded file may be corrupted or tampered with." - exit 1 - } - Start-Process -FilePath ".\LLVM-woa64.exe" -ArgumentList "/S" -Wait - echo "C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "CC=clang-cl" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - echo "CXX=clang-cl" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - echo "FC=flang-new" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index c758d05e43ab..7ac309dfff1a 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -41,13 +41,13 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 + uses: github/codeql-action/init@c10b8064de6f491fea524254123dbe5e09572f13 # v4.35.1 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 + uses: github/codeql-action/autobuild@c10b8064de6f491fea524254123dbe5e09572f13 # v4.35.1 # â„šī¸ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 + uses: github/codeql-action/analyze@c10b8064de6f491fea524254123dbe5e09572f13 # v4.35.1 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index bb185282b083..3dc30ebf76f0 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -1,3 +1,7 @@ +# To update pinned container digests and pyenv version (not handled by Dependabot): +# Containers: change tag and get new digest with +# docker pull : && docker inspect --format='{{index .RepoDigests 0}}' : +# pyenv: see https://github.com/pyenv/pyenv/releases name: Test with compiler sanitizers on: @@ -12,6 +16,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' defaults: run: @@ -30,14 +35,14 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: macos-latest steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Set up pyenv run: | - git clone https://github.com/pyenv/pyenv.git "$HOME/.pyenv" + git clone --branch v2.6.25 --depth 1 https://github.com/pyenv/pyenv.git "$HOME/.pyenv" PYENV_ROOT="$HOME/.pyenv" PYENV_BIN="$PYENV_ROOT/bin" PYENV_SHIMS="$PYENV_ROOT/shims" @@ -68,7 +73,7 @@ jobs: pip uninstall -y pytest-xdist - name: Build run: - python -m spin build -- -Db_sanitize=address,undefined -Db_lundef=false + python -m spin build -j2 -- -Db_sanitize=address,undefined -Db_lundef=false - name: Test run: | # pass -s to pytest to see ASAN errors and warnings, otherwise pytest captures them @@ -82,11 +87,11 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest container: - image: ghcr.io/nascheme/numpy-tsan:3.14t + image: ghcr.io/nascheme/numpy-tsan:3.14t@sha256:1ec427e2e480cc373d0fecbf21b8ac590fb94119fb81d18489945cd0afd04dd3 options: --shm-size=2g # increase memory for large matrix ops steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Trust working directory and initialize submodules run: | git config --global --add safe.directory /__w/numpy/numpy @@ -98,12 +103,45 @@ jobs: run: pip install -U spin - name: Build NumPy with ThreadSanitizer - run: python -m spin build -- -Db_sanitize=thread + run: python -m spin build -j2 -- -Db_sanitize=thread - name: Run tests under prebuilt TSAN container run: | export TSAN_OPTIONS="halt_on_error=0:allocator_may_return_null=1:suppressions=$GITHUB_WORKSPACE/tools/ci/tsan_suppressions.txt" echo "TSAN_OPTIONS=$TSAN_OPTIONS" python -m spin test \ - `find numpy -name "test*.py" | xargs grep -l "import threading" | tr '\n' ' '` \ + `find numpy -name "test*.py" | xargs grep -E -l "import threading|ThreadPoolExecutor" | tr '\n' ' '` \ -- -v -s --timeout=600 --durations=10 + + clang_ASAN: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + runs-on: ubuntu-latest + container: + image: ghcr.io/nascheme/cpython-asan:3.14@sha256:b5bfbcdca07e86d22afaf66e3b57959e1c44756452dcabda1efde7711fd0bdde + options: --shm-size=2g # increase memory for large matrix ops + + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + - name: Trust working directory and initialize submodules + run: | + git config --global --add safe.directory /__w/numpy/numpy + git submodule update --init --recursive + + - name: Install dependencies + run: | + pip install -r requirements/build_requirements.txt + pip install -r requirements/ci_requirements.txt + pip install -r requirements/test_requirements.txt + # xdist captures stdout/stderr, but we want the ASAN output + pip uninstall -y pytest-xdist + + - name: Build NumPy with AddressSanitizer & LeakSanitizer + run: python -m spin build -j4 -- -Db_sanitize=address,leak + + - name: Test + run: | + # pass -s to pytest to see ASAN errors and warnings, otherwise pytest captures them + export ASAN_OPTIONS="detect_leaks=1:symbolize=1:strict_init_order=true:allocator_may_return_null=1:use_sigaltstack=0" + export LSAN_OPTIONS="suppressions=$GITHUB_WORKSPACE/tools/ci/lsan_suppressions.txt" + python -m spin test -- -v -s --timeout=600 --durations=10 diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 03dc0b41f987..4c2b5db17488 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -8,6 +8,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -22,13 +23,13 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Install Cygwin - uses: egor-tensin/setup-cygwin@d2c752bab416d4b0662591bd366fc2686297c82d # v4 + uses: egor-tensin/setup-cygwin@fca9069f92361187d4abfaa5d8a7490e435d8349 # v4 with: platform: x86_64 install-dir: 'C:\tools\cygwin' @@ -37,7 +38,7 @@ jobs: python-setuptools-wheel liblapack-devel liblapack0 gcc-fortran gcc-g++ git dash cmake ninja - name: Set Windows PATH - uses: egor-tensin/cleanup-path@f04bc953e6823bf491cc0bdcff959c630db1b458 # v4.0.1 + uses: egor-tensin/cleanup-path@8cbbf6af9f8cf1d347258e3f6b850622e480d16d # v5.0.1 with: dirs: 'C:\tools\cygwin\bin;C:\tools\cygwin\lib\lapack' - name: Verify that bash is Cygwin bash @@ -67,7 +68,7 @@ jobs: cd tools /usr/bin/python3.9 -m pytest --pyargs numpy -n2 -m "not slow" - name: Upload wheel if tests fail - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 if: failure() with: name: numpy-cygwin-wheel diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 28fe31642faa..c658792f7c02 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -15,10 +15,10 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - name: 'Dependency Review' - uses: actions/dependency-review-action@3c4e3dcb1aa7874d2c16be7d79418e9b7efd6261 # v4.8.2 + uses: actions/dependency-review-action@2031cfc080254a8a887f58cffee85186f0e49e48 # v4.9.0 with: allow-ghsas: GHSA-cx63-2mw6-8hw5 diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 2b7eb24f9812..48f0a3104b50 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -9,6 +9,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' permissions: contents: read # to fetch code (actions/checkout) @@ -29,13 +30,13 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout NumPy - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@63fd63b352a9a8bdcc24791c9dbee952ee9a8abc # v3.3.0 + - uses: pypa/cibuildwheel@8d2b08b68458a16aeb24b64e68a09ab1c8e82084 # v3.4.1 env: CIBW_PLATFORM: pyodide CIBW_BUILD: cp312-* diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 7d2edc869893..a75319a7fd18 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -7,13 +7,34 @@ permissions: {} jobs: pr-labeler: + if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest permissions: pull-requests: write # to add labels steps: - name: Label the PR - uses: gerrymanoim/pr-prefix-labeler@c8062327f6de59a9ae1c19f7f07cacd0b976b6fa # v3 continue-on-error: true - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - if: github.repository == 'numpy/numpy' + uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 + with: + script: | + const yaml = require('js-yaml'); + const {data} = await github.rest.repos.getContent({ + owner: context.repo.owner, + repo: context.repo.repo, + path: '.github/pr-prefix-labeler.yml', + }); + const prefixToLabel = yaml.load( + Buffer.from(data.content, data.encoding).toString() + ); + const title = context.payload.pull_request.title; + for (const [prefix, label] of Object.entries(prefixToLabel)) { + if (title.startsWith(prefix)) { + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.payload.pull_request.number, + labels: [label], + }); + break; + } + } diff --git a/.github/workflows/linux-ibm.yml b/.github/workflows/linux-ibm.yml new file mode 100644 index 000000000000..83d0baeadfd9 --- /dev/null +++ b/.github/workflows/linux-ibm.yml @@ -0,0 +1,92 @@ +name: Linux IBM tests + +on: + pull_request: + branches: + - main + - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' + - 'tools/stubtest/**' + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + native_ibm: + # These jobs runs only in the main NumPy repository. + # It requires a native ppc64le and s390x GHA runners, which are not available on forks. + # For more details, see: https://github.com/numpy/numpy/issues/29125 + if: github.repository == 'numpy/numpy' + runs-on: ${{ matrix.config.runner }} + + strategy: + fail-fast: false + matrix: + config: + - name: "ppc64le/gcc - baseline(default)" + args: "-Dallow-noblas=false" + runner: ubuntu-24.04-ppc64le-p10 + compiler: "gcc" + - name: "ppc64le/clang - baseline(default)" + args: "-Dallow-noblas=false" + runner: ubuntu-24.04-ppc64le-p10 + compiler: "clang" + - name: "s390x/gcc - baseline(default)" + args: "-Dallow-noblas=false" + runner: ubuntu-24.04-s390x + compiler: "gcc" + - name: "s390x/clang - baseline(default)" + args: "-Dallow-noblas=false" + runner: ubuntu-24.04-s390x + compiler: "clang" + - name: "s390x/gcc - baseline(Z15/VXE2)" + args: "-Dallow-noblas=false -Dcpu-baseline=vxe2" + runner: ubuntu-24.04-s390x + compiler: "gcc" + - name: "s390x/clang - baseline(Z15/VXE2)" + args: "-Dallow-noblas=false -Dcpu-baseline=vxe2" + runner: ubuntu-24.04-s390x + compiler: "clang" + + name: "${{ matrix.config.name }}" + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + submodules: recursive + fetch-tags: true + + - name: Install dependencies + run: | + sudo apt update + sudo apt install -y python3.12 python3-pip python3-dev ninja-build gfortran \ + build-essential libopenblas-dev liblapack-dev pkg-config + pip install --upgrade pip + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt + echo "/home/runner/.local/bin" >> $GITHUB_PATH + + - name: Install clang + if: matrix.config.compiler == 'clang' + run: | + sudo apt install -y clang-20 + echo CC=clang-20 >> $GITHUB_ENV + echo CXX=clang++-20 >> $GITHUB_ENV + + - name: Meson Build + run: | + spin build -- ${{ matrix.config.args }} + + - name: Meson Log + if: always() + run: cat build/meson-logs/meson-log.txt + + - name: Run Tests + run: | + spin test -- --timeout=60 --durations=10 diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml deleted file mode 100644 index 3817f3ebc0d8..000000000000 --- a/.github/workflows/linux-ppc64le.yml +++ /dev/null @@ -1,55 +0,0 @@ -name: Native ppc64le Linux Test - -on: - pull_request: - branches: - - main - - maintenance/** - paths-ignore: - - '**.pyi' - - '**.md' - - '**.rst' - workflow_dispatch: - -permissions: - contents: read - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - native_ppc64le: - # This job runs only in the main NumPy repository. - # It requires a native ppc64le GHA runner, which is not available on forks. - # For more details, see: https://github.com/numpy/numpy/issues/29125 - if: github.repository == 'numpy/numpy' - runs-on: ubuntu-24.04-ppc64le-p10 - name: "Native PPC64LE" - steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - with: - submodules: recursive - fetch-tags: true - persist-credentials: false - - - name: Install dependencies - run: | - sudo apt update - sudo apt install -y python3 python3-pip python3-dev ninja-build gfortran \ - build-essential libopenblas-dev liblapack-dev pkg-config - pip install --upgrade pip - pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - echo "/home/runner/.local/bin" >> $GITHUB_PATH - - - name: Meson Build - run: | - spin build -- -Dallow-noblas=false - - - name: Meson Log - if: always() - run: cat build/meson-logs/meson-log.txt - - - name: Run Tests - run: | - spin test -- --timeout=60 --durations=10 diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 9d1c3ac20a45..7d6084c4b4c5 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -1,7 +1,7 @@ name: Linux tests # This file is meant for testing across supported Python versions, build types -# and interpreters (PyPy, python-dbg, a pre-release Python in summer time), +# and interpreters (python-dbg, a pre-release Python in summer time), # build-via-sdist, run benchmarks, measure code coverage, and other build # options. @@ -18,6 +18,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' defaults: run: @@ -37,14 +38,14 @@ jobs: runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-depth: 0 persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install linter requirements run: python -m pip install -r requirements/linter_requirements.txt @@ -52,11 +53,11 @@ jobs: env: BASE_REF: ${{ github.base_ref }} run: - python tools/linter.py + spin lint - name: Check Python.h is first file included run: | python tools/check_python_h_first.py - + smoke_test: # To enable this job on a fork, comment out: @@ -66,80 +67,81 @@ jobs: MESON_ARGS: "-Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none" strategy: matrix: - version: ["3.11", "3.12", "3.13", "3.14", "3.14t"] + version: ["3.12", "3.14t"] steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: ${{ matrix.version }} - uses: ./.github/meson_actions - pypy: + debug: needs: [smoke_test] - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: github.event_name != 'push' steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - name: Install debug Python + uses: deadsnakes/action@e640ac8743173a67cca4d7d77cd837e514bf98e8 # v3.2.0 with: - python-version: 'pypy3.11-v7.3.20' - - name: Setup using scipy-openblas + python-version: '3.14' + debug: true + - name: Install dependencies run: | - python -m pip install -r requirements/ci_requirements.txt - spin config-openblas --with-scipy-openblas=32 - - uses: ./.github/meson_actions + python --version + pip install -U pip + pip install -r requirements/build_requirements.txt + pip install -r requirements/test_requirements.txt + - name: Build NumPy debug + run: | + spin build -- -Dbuildtype=debug -Dallow-noblas=true + - name: Run test suite + run: | + spin test -- --timeout=600 --durations=10 - debug: + all_versions: + # like the smoke tests but runs on more Python versions needs: [smoke_test] - runs-on: ubuntu-24.04 - if: github.event_name != 'push' + # To enable this job on a fork, comment out: + if: github.repository == 'numpy/numpy' + runs-on: ubuntu-latest + env: + MESON_ARGS: "-Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none" + strategy: + matrix: + version: ["3.13", "3.14", "3.15-dev", "3.15t-dev"] steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - name: Install debug Python - run: | - sudo apt-get update - sudo apt-get install python3-dbg ninja-build - - name: Build NumPy and install into venv - run: | - python3-dbg -m venv venv - source venv/bin/activate - pip install -U pip - pip install . -v -Csetup-args=-Dbuildtype=debug -Csetup-args=-Dallow-noblas=true - - name: Install test dependencies - run: | - source venv/bin/activate - pip install -r requirements/test_requirements.txt - - name: Run test suite - run: | - source venv/bin/activate - cd tools - pytest --timeout=600 --durations=10 --pyargs numpy -m "not slow" + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 + with: + python-version: ${{ matrix.version }} + - uses: ./.github/meson_actions full: # Install as editable, then run the full test suite with code coverage needs: [smoke_test] runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install build and test dependencies from PyPI run: | pip install -r requirements/build_requirements.txt @@ -172,7 +174,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-24.04-arm steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -217,14 +219,14 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install build and benchmarking dependencies run: | sudo apt-get update @@ -256,14 +258,14 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install gfortran and setup OpenBLAS (sdist build) run: | set -xe @@ -288,6 +290,7 @@ jobs: run: | sudo apt update sudo apt install make swig + pip install setuptools make -C tools/swig/test test array_api_tests: @@ -296,23 +299,23 @@ jobs: if: github.event_name != 'push' steps: - name: Checkout NumPy - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Checkout array-api-tests - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: repository: data-apis/array-api-tests - ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 + ref: '41379d15d26d67a1e66c840e775d41a8a7fb1516' # v2026.02.26 submodules: 'true' path: 'array-api-tests' persist-credentials: false - name: Set up Python - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install build and test dependencies from PyPI run: | python -m pip install -r requirements/build_requirements.txt @@ -334,15 +337,15 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + python-version: '3.12' + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: repository: numpy/numpy-release path: numpy-release @@ -354,7 +357,7 @@ jobs: run: | pip install -r requirements/build_requirements.txt pip install -r requirements/test_requirements.txt - pip install vulture + pip install "vulture!=2.15" - name: Build and install NumPy run: | # Install using the fastest way to build (no BLAS, no SIMD) @@ -368,7 +371,7 @@ jobs: - name: Check for unreachable code paths in Python modules run: | # Need the explicit `bash -c` here because `grep` returns exit code 1 for no matches - bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/,vendored-meson/ | grep 'unreachable'" + bash -c "! vulture . --min-confidence 100 --exclude doc/,vendored-meson/ | grep 'unreachable'" - name: Check usage of install_tag run: | rm -rf build-install @@ -376,8 +379,8 @@ jobs: python tools/check_installed_files.py $(find ./build-install -path '*/site-packages/numpy') --no-tests - Linux_Python_311_32bit_full: - name: i686, cp311, full + Linux_Python_312_32bit_full: + name: i686, cp312, full needs: [smoke_test] runs-on: ubuntu-latest container: @@ -403,7 +406,7 @@ jobs: - name: build run: | - python3.11 -m venv venv + python3.12 -m venv venv source venv/bin/activate pip install --upgrade pip pip install -r requirements/ci32_requirements.txt diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 77cb9aaf91fe..aaed39e21e84 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -44,6 +44,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' defaults: run: @@ -69,14 +70,14 @@ jobs: USE_NIGHTLY_OPENBLAS: ${{ matrix.USE_NIGHTLY_OPENBLAS }} name: "Test Linux (${{ matrix.USE_NIGHTLY_OPENBLAS && 'nightly' || 'stable' }} OpenBLAS)" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | @@ -127,7 +128,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel openblas-devel -y - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -162,7 +163,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel flexiblas-devel -y - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -192,14 +193,14 @@ jobs: runs-on: ubuntu-latest name: "OpenBLAS with CMake" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | @@ -220,14 +221,14 @@ jobs: runs-on: ubuntu-latest name: "Debian libblas/liblapack" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | @@ -256,7 +257,7 @@ jobs: # If it is needed in the future, use install name `pkgconf-pkg-config` zypper install -y git gcc-c++ python3-pip python3-devel blas cblas lapack - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -274,7 +275,7 @@ jobs: run: | # do not use test_requirements.txt, it includes coverage which requires # sqlite3, which is not available on OpenSUSE python - pip install --break-system-packages pytest pytest-xdist hypothesis typing_extensions pytest-timeout + pip install --break-system-packages pytest pytest-xdist hypothesis pytest-timeout spin test -j auto -- numpy/linalg --timeout=600 --durations=10 @@ -283,14 +284,14 @@ jobs: runs-on: ubuntu-latest name: "MKL (LP64, ILP64, SDL)" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | @@ -347,14 +348,14 @@ jobs: runs-on: ubuntu-latest name: "BLIS" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | @@ -384,14 +385,14 @@ jobs: runs-on: ubuntu-latest name: "ATLAS" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index e4688e50e5bc..f71eb304a9bb 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -1,3 +1,8 @@ +# To update pinned container digests and uv version: not handled by Dependabot. +# Containers: change tag and get new digest with +# docker pull : && docker inspect --format='{{index .RepoDigests 0}}' : +# uv: change version in URL and update checksum (curl -sL | sha256sum) +# # Meson's Python module doesn't support crosscompiling, # and python dependencies may be another potential hurdle. # There might also be a need to run runtime tests during configure time. @@ -18,6 +23,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' workflow_dispatch: defaults: @@ -42,41 +48,6 @@ jobs: fail-fast: false matrix: BUILD_PROP: - - [ - "ppc64le", - "powerpc64le-linux-gnu", - "ppc64le/ubuntu:22.04", - "-Dallow-noblas=true", - "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - "ppc64le" - ] - - [ - "ppc64le - baseline(Power9)", - "powerpc64le-linux-gnu", - "ppc64le/ubuntu:22.04", - "-Dallow-noblas=true -Dcpu-baseline=vsx3", - "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - "ppc64le" - ] - - [ - "s390x", - "s390x-linux-gnu", - "s390x/ubuntu:22.04", - "-Dallow-noblas=true", - # Skipping TestRationalFunctions.test_gcd_overflow test - # because of a possible qemu bug that appears to be related to int64 overflow in absolute operation. - # TODO(@seiko2plus): Confirm the bug and provide a minimal reproducer, then report it to upstream. - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow", - "s390x" - ] - - [ - "s390x - baseline(Z13)", - "s390x-linux-gnu", - "s390x/ubuntu:22.04", - "-Dallow-noblas=true -Dcpu-baseline=vx", - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow", - "s390x" - ] - [ "riscv64", "riscv64-linux-gnu", @@ -95,7 +66,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -103,8 +74,7 @@ jobs: - name: Initialize binfmt_misc for qemu-user-static run: | - # see https://hub.docker.com/r/tonistiigi/binfmt for available versions - docker run --rm --privileged tonistiigi/binfmt:qemu-v9.2.2-52 --install all + docker run --rm --privileged tonistiigi/binfmt:qemu-v9.2.2-52@sha256:1b804311fe87047a4c96d38b4b3ef6f62fca8cd125265917a9e3dc3c996c39e6 --install all - name: Install GCC cross-compilers run: | @@ -112,7 +82,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -124,7 +94,11 @@ jobs: docker run --platform=linux/${ARCH} --name the_container --interactive \ -v /:/host -v $(pwd):/numpy ${DOCKER_CONTAINER} /bin/bash -c " apt update && - apt install -y cmake git python3 python-is-python3 python3-dev python3-pip && + apt install -y cmake git curl ca-certificates && + curl -LsSf https://astral.sh/uv/0.10.8/install.sh -o /tmp/uv-install.sh && + echo 'eae5e1dae89cd0b74d357f549ccd6faa94b2ad6c1d89d78972a625655a4556ae /tmp/uv-install.sh' | sha256sum -c - && + sh /tmp/uv-install.sh && + export PATH="/root/.local/bin:$PATH" && mkdir -p /lib64 && ln -s /host/lib64/ld-* /lib64/ && ln -s /host/lib/x86_64-linux-gnu /lib/x86_64-linux-gnu && rm -rf /usr/${TOOLCHAIN_NAME} && ln -s /host/usr/${TOOLCHAIN_NAME} /usr/${TOOLCHAIN_NAME} && @@ -140,8 +114,9 @@ jobs: git config --global --add safe.directory /numpy && # No need to build ninja from source, the host ninja is used for the build grep -v ninja /numpy/requirements/build_requirements.txt > /tmp/build_requirements.txt && - python -m pip install -r /tmp/build_requirements.txt && - python -m pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout && + uv venv --python 3.12 .venv && + source .venv/bin/activate && + uv pip install -r /tmp/build_requirements.txt pytest pytest-xdist hypothesis pytest-timeout rm -f /usr/local/bin/ninja && mkdir -p /usr/local/bin && ln -s /host/usr/bin/ninja /usr/local/bin/ninja " docker commit the_container the_container @@ -157,7 +132,7 @@ jobs: docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ -v $(pwd):/numpy -v /:/host the_container \ /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin build --clean -- ${MESON_OPTIONS} + source .venv/bin/activate && cd /numpy && spin build --clean -- ${MESON_OPTIONS} '" - name: Meson Log @@ -170,7 +145,7 @@ jobs: -v $(pwd):/numpy -v /:/host the_container \ /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' export F90=/usr/bin/gfortran - cd /numpy && spin test -- --timeout=600 --durations=10 -k \"${RUNTIME_TEST_FILTER}\" + source .venv/bin/activate && cd /numpy && spin test -- --timeout=600 --durations=10 -k \"${RUNTIME_TEST_FILTER}\" '" @@ -187,7 +162,7 @@ jobs: - [ "loongarch64", "loongarch64-linux-gnu", - "cnclarechen/numpy-loong64-debian:v1", + "cnclarechen/numpy-loong64-debian:v1@sha256:1f35e614955fa9cba890172a73a068120f25f1cfcd59ad5521995e37cb7e2c3f", "-Dallow-noblas=true", "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", "loong64" @@ -202,14 +177,14 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true - name: Initialize binfmt_misc for qemu-user-static run: | - docker run --rm --privileged tonistiigi/binfmt:qemu-v10.0.4-56 --install all + docker run --rm --privileged tonistiigi/binfmt:qemu-v10.0.4-56@sha256:30cc9a4d03765acac9be2ed0afc23af1ad018aed2c28ea4be8c2eb9afe03fbd1 --install all - name: Install GCC cross-compilers run: | @@ -217,7 +192,7 @@ jobs: sudo apt install -y ninja-build gcc-14-${TOOLCHAIN_NAME} g++-14-${TOOLCHAIN_NAME} gfortran-14-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -232,6 +207,8 @@ jobs: ln -s /host/lib/x86_64-linux-gnu /lib/x86_64-linux-gnu && ln -s /host/usr/${TOOLCHAIN_NAME} /usr/${TOOLCHAIN_NAME} && ln -s /host/usr/lib/gcc-cross/${TOOLCHAIN_NAME} /usr/lib/gcc/${TOOLCHAIN_NAME} && + mkdir -p /usr/libexec/gcc && + rm -rf /usr/libexec/gcc/${TOOLCHAIN_NAME} && ln -s /host/usr/libexec/gcc/${TOOLCHAIN_NAME} /usr/libexec/gcc/${TOOLCHAIN_NAME} && rm -f /usr/bin/gcc && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-gcc-14 /usr/bin/gcc && rm -f /usr/bin/g++ && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-g++-14 /usr/bin/g++ && rm -f /usr/bin/gfortran && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-gfortran-14 /usr/bin/gfortran && @@ -241,8 +218,14 @@ jobs: rm -f /usr/bin/ld.bfd && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-ld.bfd /usr/bin/ld.bfd && rm -f /usr/bin/ninja && ln -s /host/usr/bin/ninja /usr/bin/ninja && git config --global --add safe.directory /numpy && - python -m pip install --break-system-packages -r /numpy/requirements/build_requirements.txt && - python -m pip install --break-system-packages pytest pytest-xdist hypothesis typing_extensions + # No need to build ninja from source, the host ninja is used for the build + grep -v ninja /numpy/requirements/build_requirements.txt > /tmp/build_requirements.txt && + python -m pip install --break-system-packages uv --extra-index-url https://mirrors.loong64.com/pypi/simple && + export PATH="/root/.local/bin:$PATH" && + uv venv --python 3.12 .venv && + source .venv/bin/activate && + uv pip install -r /tmp/build_requirements.txt pytest pytest-xdist hypothesis && + rm -f /usr/local/bin/ninja && mkdir -p /usr/local/bin && ln -s /host/usr/bin/ninja /usr/local/bin/ninja " docker commit the_container the_container mkdir -p "~/docker_${TOOLCHAIN_NAME}" @@ -257,7 +240,7 @@ jobs: docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ -v $(pwd):/numpy -v /:/host the_container \ /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy/ && spin build --clean -- ${MESON_OPTIONS} + source .venv/bin/activate && cd /numpy/ && spin build --clean -- ${MESON_OPTIONS} '" - name: Meson Log @@ -269,5 +252,5 @@ jobs: docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ -v $(pwd):/numpy -v /:/host the_container \ /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" + source .venv/bin/activate && cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" '" diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index dcc483eaf6df..0293d95b9aa2 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -1,5 +1,9 @@ name: Linux SIMD tests +# To update Intel SDE (not handled by Dependabot): download new version from +# https://www.intel.com/content/www/us/en/developer/articles/tool/software-development-emulator.html +# and update SDE_URL and SDE_SHA256 (curl -sL | sha256sum) +# # This file is meant for testing different SIMD-related build options and # optimization levels. See `meson_options.txt` for the available build options. # @@ -35,6 +39,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' defaults: run: @@ -58,14 +63,14 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=none" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' + python-version: '3.12' - uses: ./.github/meson_actions name: Build/Test @@ -76,14 +81,14 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install GCC9/10 run: | @@ -123,13 +128,13 @@ jobs: args: "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none" name: "ARM64 SIMD - ${{ matrix.config.name }}" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | python -m pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt @@ -157,12 +162,12 @@ jobs: - [ "native", "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none", - "3.11" + "3.12" ] - [ "without avx512", "-Dallow-noblas=true -Dcpu-dispatch=max-x86_v4", - "3.11" + "3.12" ] env: @@ -170,12 +175,12 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: "${{ matrix.BUILD_PROP[2] }}" - uses: ./.github/meson_actions @@ -185,18 +190,21 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install Intel SDE run: | - curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/859732/sde-external-9.58.0-2025-06-16-lin.tar.xz + SDE_URL="https://downloadmirror.intel.com/859732/sde-external-9.58.0-2025-06-16-lin.tar.xz" + SDE_SHA256="f849acecad4c9b108259c643b2688fd65c35723cd23368abe5dd64b917cc18c0" + curl -o /tmp/sde.tar.xz "$SDE_URL" + echo "$SDE_SHA256 /tmp/sde.tar.xz" | sha256sum -c - mkdir /tmp/sde && tar -xvf /tmp/sde.tar.xz -C /tmp/sde/ sudo mv /tmp/sde/* /opt/sde && sudo ln -s /opt/sde/sde64 /usr/bin/sde @@ -235,18 +243,21 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install Intel SDE run: | - curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/859732/sde-external-9.58.0-2025-06-16-lin.tar.xz + SDE_URL="https://downloadmirror.intel.com/859732/sde-external-9.58.0-2025-06-16-lin.tar.xz" + SDE_SHA256="f849acecad4c9b108259c643b2688fd65c35723cd23368abe5dd64b917cc18c0" + curl -o /tmp/sde.tar.xz "$SDE_URL" + echo "$SDE_SHA256 /tmp/sde.tar.xz" | sha256sum -c - mkdir /tmp/sde && tar -xvf /tmp/sde.tar.xz -C /tmp/sde/ sudo mv /tmp/sde/* /opt/sde && sudo ln -s /opt/sde/sde64 /usr/bin/sde diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 2787a7c81d3a..9f59bb9f7002 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -9,6 +9,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' permissions: contents: read # to fetch code (actions/checkout) @@ -32,7 +33,7 @@ jobs: python-version: ["3.12"] steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -49,7 +50,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -58,7 +59,7 @@ jobs: ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos- - name: Setup Miniforge - uses: conda-incubator/setup-miniconda@835234971496cad1653abb28a638a281cf32541f # v3.2.0 + uses: conda-incubator/setup-miniconda@fc2d68f6413eb2d87b895e92f8584b5b94a10167 # v3.3.0 with: python-version: ${{ matrix.python-version }} channels: conda-forge @@ -73,7 +74,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 @@ -118,16 +119,16 @@ jobs: build_runner: - [ macos-15-intel, "macos_x86_64" ] - [ macos-14, "macos_arm64" ] - version: ["3.11", "3.14t"] + version: ["3.12", "3.14t"] steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: ${{ matrix.version }} @@ -153,5 +154,5 @@ jobs: - name: Test in multiple threads if: ${{ matrix.version == '3.14t' && matrix.build_runner[0] == 'macos-14' }} run: | - pip install pytest-run-parallel==0.7.0 - spin test -p 4 -- --timeout=600 --durations=10 + pip install pytest-run-parallel==0.8.2 + spin test -p 4 -- -sv --timeout=600 --durations=10 diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 03b471b10c63..4572d84aa884 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -28,20 +28,21 @@ jobs: shard-index: [0] # e.g. change this to [0, 1, 2] and --num-shards below to 3 fail-fast: false steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: numpy_to_test fetch-depth: 0 - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: "3.12" - name: Install dependencies - run: pip install git+https://github.com/hauntsaninja/mypy_primer.git + # To update: replace commit hash (no tags/releases available, use HEAD of master) + run: pip install git+https://github.com/hauntsaninja/mypy_primer.git@05f73ec3d85bb4f55676f3c57f2c3e5136228977 # HEAD of master on 2026-03-04 - name: Run mypy_primer shell: bash run: | cd numpy_to_test - MYPY_VERSION=$(grep mypy== requirements/test_requirements.txt | sed -n 's/mypy==\([^;]*\).*/\1/p') + MYPY_VERSION=$(grep mypy== requirements/typing_requirements.txt | sed -n 's/mypy==\([^;]*\).*/\1/p') echo "new commit" git checkout $GITHUB_SHA @@ -65,6 +66,7 @@ jobs: --known-dependency-selector numpy \ --old-prepend-path numpy_base --new-prepend-path numpy_to_test \ --num-shards 1 --shard-index ${{ matrix.shard-index }} \ + --additional-flags="--python-version=3.12" \ --debug \ --output concise \ | tee diff_${{ matrix.shard-index }}.txt @@ -74,7 +76,7 @@ jobs: run: | echo ${{ github.event.pull_request.number }} | tee pr_number.txt - name: Upload mypy_primer diff + PR number - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 if: ${{ matrix.shard-index == 0 }} with: name: mypy_primer_diffs-${{ matrix.shard-index }} @@ -82,7 +84,7 @@ jobs: diff_${{ matrix.shard-index }}.txt pr_number.txt - name: Upload mypy_primer diff - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 if: ${{ matrix.shard-index != 0 }} with: name: mypy_primer_diffs-${{ matrix.shard-index }} @@ -96,7 +98,7 @@ jobs: contents: read steps: - name: Merge artifacts - uses: actions/upload-artifact/merge@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact/merge@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: mypy_primer_diffs pattern: mypy_primer_diffs-* diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index 9f4cda234717..68a0978e7a12 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -18,7 +18,7 @@ jobs: if: ${{ github.event.workflow_run.conclusion == 'success' }} steps: - name: Download diffs - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 with: script: | const fs = require('fs'); @@ -42,14 +42,14 @@ jobs: - name: Get PR number id: get-pr-number - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 with: script: | const fs = require('fs'); return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: int128/hide-comment-action@3580fff2b9b7c0e16466686530622f0eed93132a # v1.47.0 + uses: int128/hide-comment-action@42badf94b3efd95bf2138bd9c74da19203e83f40 # v1.55.0 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ steps.get-pr-number.outputs.result }} @@ -58,7 +58,7 @@ jobs: - name: Post comment id: post-comment - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | diff --git a/.github/workflows/pixi-packages.yml b/.github/workflows/pixi-packages.yml new file mode 100644 index 000000000000..8408ae032bd3 --- /dev/null +++ b/.github/workflows/pixi-packages.yml @@ -0,0 +1,47 @@ +name: Pixi packages tests + +on: + pull_request: + branches: + - main + - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' + +permissions: + contents: read # to fetch code (actions/checkout) + +jobs: + build_packages: + name: Build Pixi packages + runs-on: ${{ matrix.runs-on }} + strategy: + fail-fast: false + matrix: + runs-on: [ubuntu-latest, ubuntu-24.04-arm, macos-latest] + package_variant: + - default + - freethreading + - asan + # TSan CI runs for longer than 30 minutes due to slow + # stdlib bytecode compilation step. Disable or parallelize + # byte compilation once rattler-build has support. + # See https://github.com/prefix-dev/pixi/pull/5737 + # - tsan-freethreading + if: github.repository == 'numpy/numpy' + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - uses: prefix-dev/setup-pixi@1b2de7f3351f171c8b4dfeb558c639cb58ed4ec0 # v0.9.5 + with: + pixi-version: v0.64.0 + run-install: false + + - name: Build + run: pixi build --path="pixi-packages/${{ matrix.package_variant }}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index d8daf8779d92..6812aceca602 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -25,7 +25,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v3.1.0 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v3.1.0 with: persist-credentials: false @@ -42,7 +42,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable # uploads of run results in SARIF format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: SARIF file path: results.sarif @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@fdbfb4d2750291e159f0156def62b853c2798ca2 # v2.1.27 + uses: github/codeql-action/upload-sarif@c10b8064de6f491fea524254123dbe5e09572f13 # v2.1.27 with: sarif_file: results.sarif diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index 731d9091ec69..295ca31f3e13 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -14,7 +14,7 @@ on: - ".github/workflows/stubtest.yml" - "numpy/**" - "!numpy/**/tests/**" - - "requirements/test_requirements.txt" + - "requirements/typing_requirements.txt" - "tools/stubtest/**" workflow_dispatch: @@ -34,28 +34,28 @@ jobs: matrix: # TODO: consider including macos and windows os: [ubuntu] - py: ["3.11", "3.14"] + py: ["3.12", "3.14"] steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 + - uses: astral-sh/setup-uv@cec208311dfd045dd5311c1add060b2062131d57 # v8.0.0 with: python-version: ${{ matrix.py }} activate-environment: true cache-dependency-glob: | requirements/build_requirements.txt - requirements/test_requirements.txt + requirements/typing_requirements.txt - name: uv pip install run: >- uv pip install -r requirements/build_requirements.txt - -r requirements/test_requirements.txt + -r requirements/typing_requirements.txt - name: spin build run: spin build -j2 -- -Dallow-noblas=true -Ddisable-optimization=true --vsenv diff --git a/.github/workflows/mypy.yml b/.github/workflows/typecheck.yml similarity index 66% rename from .github/workflows/mypy.yml rename to .github/workflows/typecheck.yml index e3c6e7beba13..6f6ce50b9440 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/typecheck.yml @@ -1,4 +1,4 @@ -name: Run MyPy +name: Type-checking # Mypy is too slow to run as part of regular CI. The purpose of the jobs in # this file is to cover running Mypy across: @@ -53,32 +53,53 @@ jobs: matrix: os_python: - [macos-latest, '3.14'] - - [ubuntu-latest, '3.12'] - - [windows-latest, '3.11'] + - [ubuntu-latest, '3.13'] + - [windows-latest, '3.12'] steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 + - uses: astral-sh/setup-uv@cec208311dfd045dd5311c1add060b2062131d57 # v8.0.0 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true cache-dependency-glob: | requirements/build_requirements.txt - requirements/test_requirements.txt + requirements/typing_requirements.txt - name: Install dependencies # orjson makes mypy faster but the default requirements.txt # can't install it because orjson doesn't support 32 bit Linux run: >- uv pip install -r requirements/build_requirements.txt - -r requirements/test_requirements.txt + -r requirements/typing_requirements.txt orjson + basedpyright - name: Build run: | spin build -j2 -- -Dallow-noblas=true -Ddisable-optimization=true --vsenv - name: Run Mypy run: | spin mypy + - name: Check basedpyright's type completeness is at least 100% + run: >- + spin run python tools/pyright_completeness.py + --verifytypes numpy + --ignoreexternal + --exclude-like '*.tests.*' '*.conftest.*' + + pyrefly: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + - uses: astral-sh/setup-uv@cec208311dfd045dd5311c1add060b2062131d57 # v8.0.0 + with: + activate-environment: true + - name: Install dependencies + run: >- + uv pip install + -r requirements/typing_requirements.txt + - name: Run pyrefly + run: pyrefly check diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 24061ae7b014..9ef380bba68c 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -13,6 +13,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' workflow_dispatch: concurrency: @@ -42,13 +43,13 @@ jobs: - [macos-14, macosx_arm64, openblas] - [windows-2022, win_amd64, ""] - [windows-11-arm, win_arm64, ""] - python: ["cp311"] + python: ["cp312"] env: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} # used in cibw_test_command.sh steps: - name: Checkout numpy - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: true persist-credentials: false @@ -59,14 +60,17 @@ jobs: with: architecture: 'x86' - - name: Setup LLVM for Windows ARM64 + - name: win_arm64 - set environment variables if: ${{ matrix.buildplat[1] == 'win_arm64' }} - uses: ./.github/windows_arm64_steps + run: | + echo "C:\Program Files\LLVM\bin" >> $env:GIHUB_PATH + echo "CC=clang-cl" >> $env:GITHUB_ENV + echo "CXX=clang-cl" >> $env:GITHUB_ENV + echo "FC=flang" >> $env:GITHUB_ENV - name: pkg-config-for-win if: runner.os == 'windows' run: | - choco install -y --no-progress --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite $CIBW = "${{ github.workspace }}/.openblas" # pkgconfig needs a complete path, and not just "./openblas since the # build is run in a tmp dir (?) @@ -98,11 +102,11 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@63fd63b352a9a8bdcc24791c9dbee952ee9a8abc # v3.3.0 + uses: pypa/cibuildwheel@8d2b08b68458a16aeb24b64e68a09ab1c8e82084 # v3.4.1 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + - uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index a0604fa87aa9..028cc221ecd8 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -9,6 +9,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -25,14 +26,14 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Setup Python - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: "3.14t" @@ -42,7 +43,7 @@ jobs: - name: Install pkg-config run: | - choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite + python -m pip install pkgconf==2.5.1.post1 echo "PKG_CONFIG_PATH=${{ github.workspace }}/.openblas" >> $env:GITHUB_ENV - name: Install NumPy (Clang-cl) @@ -83,16 +84,16 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Setup Python - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: - python-version: '3.11' + python-version: '3.12' architecture: ${{ matrix.architecture }} - name: Setup MSVC @@ -133,37 +134,38 @@ jobs: pyver: '3.14' - BLAS: 32 TEST_MODE: fast - pyver: '3.11' + pyver: '3.12' # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Setup Python - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: ${{ matrix.pyver }} - name: pkg-config run: | - choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite + python -m pip install pkgconf==2.5.1.post1 - name: Dependencies run: | python -m pip install -r requirements/test_requirements.txt + python -m pip install -r requirements/build_requirements.txt - name: Build and install run: | pip install -r requirements/ci_requirements.txt spin config-openblas --with-scipy-openblas=${{ matrix.BLAS }} $env:PKG_CONFIG_PATH="$pwd/.openblas" - python -m pip install . -v -Csetup-args="--vsenv" -Csetup-args="-Dallow-noblas=false" + python -m pip install --no-build-isolation . -v -Csetup-args="--vsenv" -Csetup-args="-Dallow-noblas=false" - name: Run test suite ${{ matrix.TEST_MODE }} run: | diff --git a/.gitignore b/.gitignore index b54de4091bf3..de556eb17de3 100644 --- a/.gitignore +++ b/.gitignore @@ -109,6 +109,73 @@ doc/source/**/generated/ doc/source/release/notes-towncrier.rst doc/source/.jupyterlite.doit.db +# AI coding agent configuration files # +####################################### +# Personal/local config — not shared with the project. +# Remove entries below if the project decides to check in shared +# project-wide instructions (e.g. a team CLAUDE.md or AGENTS.md). + +# Claude Code (Anthropic) +CLAUDE.md +CLAUDE.local.md +.claude/ +.claudeignore + +# AGENTS.md (cross-tool standard: Codex, Cursor, Cline, Windsurf, etc.) +AGENTS.md +AGENTS.override.md + +# OpenAI Codex +.codex/ + +# Cursor +.cursorrules +.cursorignore +.cursor/ + +# Windsurf / Codeium +.windsurfrules +.windsurf/ +.codeiumignore + +# Warp +WARP.md + +# Google Gemini CLI +GEMINI.md +.gemini/ +.geminiignore + +# Google Jules +JULES.md + +# JetBrains Junie +.junie/ + +# Cline +.clinerules +.clinerules/ + +# Roo Code +.roorules +.roo/ + +# KiloCode +.kilocoderules +.kilocode/ + +# Aider +.aider* +.aider.conf.yml + +# Continue.dev +.continue/ + +# Generic / cross-tool AI ignore files +.aiignore +.aiexclude +.uignore + # Things specific to this project # ################################### benchmarks/results diff --git a/.mailmap b/.mailmap index 18cfb272618f..1a906d065f47 100644 --- a/.mailmap +++ b/.mailmap @@ -18,6 +18,7 @@ !LSchroefl <65246829+LSchroefl@users.noreply.github.com> !Lbogula !Lisa <34400837+lyzlisa@users.noreply.github.com> +!MyUserNameWasTakenLinux !Patrick <39380924+xamm@users.noreply.github.com> !Scian <65375075+hoony6134@users.noreply.github.com> !Searchingdays @@ -34,9 +35,11 @@ !h-vetinari !h6197627 <44726212+h6197627@users.noreply.github.com> !hutauf +!ianlv <168640168+ianlv@users.noreply.github.com> !jbCodeHub !juztamau5 !karl3wm +!kostayScr <11485271+kostayScr@users.noreply.github.com> !legoffant <58195095+legoffant@users.noreply.github.com> !liang3zy22 <35164941+liang3zy22@users.noreply.github.com> !luzpaz @@ -49,6 +52,7 @@ !mykykh <49101849+mykykh@users.noreply.github.com> !nullSoup <34267803+nullSoup@users.noreply.github.com> !ogidig5 <82846833+ogidig5@users.noreply.github.com> +!olivier !partev !pkubaj !pmvz @@ -69,10 +73,13 @@ !yan-wyb !yetanothercheer Aaron Baecker -Adrin Jalali Abhishek Kumar Abhishek Kumar <142383124+abhishek-iitmadras@users.noreply.github.com> +Abhishek Tiwari <27881020+Abhi210@users.noreply.github.com> Abraham Medina +Adrin Jalali +Akhil Kannan +Akhil Kannan <143798318+Alverok@users.noreply.github.com> Arun Kota Arun Kota Arun Kota Aarthi Agurusa @@ -94,6 +101,7 @@ Aerik Pawson <45904740+aerikpawson@users.noreply.github.com> Ahmet Can Solak Amrit Krishnan Amrit Krishnan +Ankit Ahlawat Alban Desmaison Albert Jornet Puig Alberto Rubiales @@ -121,6 +129,7 @@ Alok Singhal Alok Singhal Alyssa Quek Andrea Bianchi Andrea Bianchi andrea-bia +Anik Chand <161185149+anikchand461@users.noreply.github.com> Ankit Dwivedi Ankit Dwivedi Ankur Singh @@ -137,6 +146,7 @@ Andreas Schwab Andrei Kucharavy Andrej Zhilenkov Andrew Lawson +Aniket Singh Yadav <148300120+Aniketsy@users.noreply.github.com> Anirudh Subramanian Anne Archibald Anne Archibald @@ -216,6 +226,7 @@ Chris Navarro <24905907+lvllvl@users.noreply.github.com Chris Vavaliaris Christian Barbia Christian Clauss +Christine P. Chai Christopher Dahlin Christopher Hanley Christoph Buchner @@ -253,6 +264,7 @@ David Pitchford David Prosin Davide Dal Bosco <62077652+davidedalbosco@users.noreply.github.com> Dawid Zych +Dennis Van de Vorst <87502756+dvorst@users.noreply.github.com> Dennis Zollo Derek Homeier Derek Homeier @@ -260,6 +272,7 @@ Derek Homeier Devin Shanahan Daval Parmar <53395856+DhavalParmar61@users.noreply.github.com> +Diego Atencia <53157128+alektebel@users.noreply.github.com> Digya Acharya Dima Pasechnik Dima Pasechnik @@ -271,6 +284,7 @@ D.J. Ramones <50655786+djramones@users.noreply.github.com> Dmitriy Shalyga Dmitry Belov Dustan Levenstein <43019642+dustanlevenstein@users.noreply.github.com> +Diya Singh Dylan Cutler Ed Schofield Egor Zindy @@ -313,6 +327,9 @@ Greg Young Greg Young Gregory R. Lee Gregory R. Lee +Gubaydullin Danis +Gubaydullin Danis <96629796+DanisNone@users.noreply.github.com> +Guido Imperiale Guo Ci guoci Guo Shuai Gyeongjae Choi @@ -385,6 +402,7 @@ JessÊ Pires Jessi J Zhao <35235453+jessijzhao@users.noreply.github.com> Jhong-Ken Chen (険äģ˛č‚¯) Jhong-Ken Chen (険äģ˛č‚¯) <37182101+kennychenfs@users.noreply.github.com> +Jingu Kang Jiuding Tan (谭九éŧŽ) <109224573@qq.com> Johann Faouzi Johann Rohwer @@ -431,6 +449,7 @@ Karan Dhir Karel Planken <71339309+kplanken@users.noreply.github.com> Karthik Gangula <56480632+gangula-karthik@users.noreply.github.com> Karthik Kaiplody +Khelf Mohamed Keller Meier Kenny Huynh Kevin Granados @@ -442,6 +461,7 @@ Khaled Ben Abdallah Okuda Kiko Correoso kikocorreoso Kiko Correoso kikocorreoso Kira Prokopenko +Koki Watanabe Konrad Kapp Kristoffer Pedersen Kristoffer Pedersen @@ -545,6 +565,9 @@ Mircea Akos Bruma <35742861+Mitchell-Faas@users.noreply.github.com> Mohammed Abdul Rahman Mohammed Abdul Rahman <130785777+that-ar-guy@users.noreply.github.com> +Muhammad Maaz +Muhammad Maaz <76714503+mmaaz-git@users.noreply.github.com> +Mohammed Zuhaib <56065368+zuhu2195@users.noreply.github.com> Mohaned Qunaibit Muhammad Kasim Muhammed Muhsin @@ -585,6 +608,7 @@ Panos Mavrogiorgos Pantelis Antonoudiou Pantelis Antonoudiou Pat Miller patmiller +Paul Caprioli Paul Ivanov Paul Ivanov Paul Jacobson @@ -601,6 +625,7 @@ Peter J Cock Peter Kämpf Peyton Murray Phil Elson +Phoenix Studio <59125767+phoenixstudiodz@users.noreply.github.com> Filipe Laíns Pierre GM Pierre GM pierregm @@ -612,14 +637,18 @@ Prathmesh Shirsat <55539563+Fayyr@users.noreply.github.com> Prithvi Singh Prithvi Singh <42640176+prithvitewatia@users.noreply.github.com> Przemyslaw Bartosik +Raghuveer Devulapalli Raghuveer Devulapalli -Raghuveer Devulapalli <44766858+r-devulap@users.noreply.github.com> +Raghuveer Devulapalli <447668+r-devulap@users.noreply.github.com> Rajas Rade lkdmttg7 Rakesh Vasudevan +Ralf BÃŧrkle <214435818+polaris-3@users.noreply.github.com> Ralf Gommers Ralf Gommers rgommers Rehas Sachdeva Richard Howe <45905457+rmhowe425@users.noreply.github.com> +Riku Sakamoto +Riku Sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Ritta Narita Riya Sharma Rob Timpe @@ -635,6 +664,7 @@ Roman Yurchak Ronan Lamy Ronan Lamy Rostan Tabet Roy Jacobson +Rupesh Sharma <206439536+Rupeshhsharma@users.noreply.github.com> Russell Hewett Ryan Blakemore Ryan Polley @@ -661,6 +691,7 @@ Schrijvers Luc Sean Cheah Sean Cheah <67928790+thalassemia@users.noreply.github.com> Sebastian Berg +Sebastian Berg Sebastian Schleehauf Serge Guelton Sergei Vorfolomeev <39548292+vorfol@users.noreply.github.com> @@ -670,6 +701,7 @@ Shubham Gupta Shubham Gupta <63910248+shubham11941140@users.noreply.github.com> Shekhar Prasad Rajak Shen Zhou +Shirong Wang Shreya Singh Shota Kawabuchi Siavash Eliasi @@ -696,6 +728,8 @@ Stuart Archibald Stuart Archibald SUMIT SRIMANI <2301109104@ptuniv.edu.in SuryaChand P +Swayam Singh +Swayam Singh Sylvain Ferriol Takanori Hirano Theodoros Nikolaou @@ -704,6 +738,7 @@ Talha Mohsin <131553190+talhabm@users.noreply.github.com Thomas A Caswell Thomas Kluyver Thomas Orgis +Timileyin Daso Tim Cera Tim Teichmann Tim Teichmann <44259103+tteichmann@users.noreply.github.com> @@ -720,6 +755,7 @@ Travis Oliphant Travis Oliphant Travis Oliphant Vahid Tavanashad <120411540+vtavana@users.noreply.github.com> +Varad Raj Singh Valentin Haenel Valentin Haenel Vardhaman Kalloli <83634399+cyai@users.noreply.github.com> @@ -745,6 +781,8 @@ Wim Glenn Wojtek Ruszczewski Wojciech Rzadkowski <33913808+wrzadkow@users.noreply.github.com> Xiangyi Wang +Xiaoyu +Xiaoyu Yamada Fuyuka Yang Hau Yang Hau diff --git a/.spin/cmds.py b/.spin/cmds.py index ea62717e4f78..12832fb3b24d 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -9,8 +9,6 @@ import spin from spin.cmds import meson -IS_PYPY = (sys.implementation.name == 'pypy') - # Check that the meson git submodule is present curdir = pathlib.Path(__file__).parent meson_import_dir = curdir.parent / 'vendored-meson' / 'meson' / 'mesonbuild' @@ -79,10 +77,14 @@ def changelog(token, revision_range): help="Build with pre-installed scipy-openblas32 or scipy-openblas64 wheel" ) @spin.util.extend_command(spin.cmds.meson.build) -def build(*, parent_callback, with_scipy_openblas, **kwargs): +def build(*, parent_callback, meson_args, with_scipy_openblas, **kwargs): if with_scipy_openblas: _config_openblas(with_scipy_openblas) - parent_callback(**kwargs) + + # Avoid byte-compiling on every rebuild/reinstall, that's very expensive + meson_args += ("-Dpython.bytecompile=-1",) + + parent_callback(**{'meson_args': meson_args, **kwargs}) @spin.util.extend_command(spin.cmds.meson.docs) @@ -129,10 +131,7 @@ def docs(*, parent_callback, **kwargs): jobs_param = next(p for p in docs.params if p.name == 'jobs') jobs_param.default = 1 -if IS_PYPY: - default = "not slow and not slow_pypy" -else: - default = "not slow" +default = "not slow" @click.option( "-m", @@ -159,7 +158,7 @@ def test(*, parent_callback, pytest_args, tests, markexpr, parallel_threads, **k When pytest-run-parallel is avaliable, use `spin test -p auto` or `spin test -p ` to run tests sequentional in parallel threads. - """ # noqa: E501 + """ if (not pytest_args) and (not tests): pytest_args = ('--pyargs', 'numpy') @@ -206,10 +205,10 @@ def check_docs(*, parent_callback, pytest_args, **kwargs): - This command only doctests public objects: those which are accessible from the top-level `__init__.py` file. - """ # noqa: E501 + """ try: # prevent obscure error later - import scipy_doctest # noqa: F401 + import scipy_doctest except ModuleNotFoundError as e: raise ModuleNotFoundError("scipy-doctest not installed") from e if scipy_doctest.__version__ < '1.8.0': @@ -252,7 +251,7 @@ def check_tutorials(*, parent_callback, pytest_args, **kwargs): - This command only doctests public objects: those which are accessible from the top-level `__init__.py` file. - """ # noqa: E501 + """ # handle all of # - `spin check-tutorials` (pytest_args == ()) # - `spin check-tutorials path/to/rst`, and @@ -397,6 +396,11 @@ def lint(ctx, fix): help="The factor above or below which a benchmark result is " "considered reportable. This is passed on to the asv command." ) +@click.option( + '--cpu-affinity', default=None, multiple=False, + help="Set CPU affinity for running the benchmark, in format: 0 or 0,1,2 or 0-3." + "Default: not set" +) @click.argument( 'commits', metavar='', required=False, @@ -404,7 +408,8 @@ def lint(ctx, fix): ) @meson.build_dir_option @click.pass_context -def bench(ctx, tests, compare, verbose, quick, factor, commits, build_dir): +def bench(ctx, tests, compare, verbose, quick, factor, cpu_affinity, + commits, build_dir): """🏋 Run benchmarks. \b @@ -447,6 +452,9 @@ def bench(ctx, tests, compare, verbose, quick, factor, commits, build_dir): if quick: bench_args = ['--quick'] + bench_args + if cpu_affinity: + bench_args += ['--cpu-affinity', cpu_affinity] + if not compare: # No comparison requested; we build and benchmark the current version @@ -535,6 +543,7 @@ def ipython(*, ipython_args, build_dir): def mypy(ctx): """đŸĻ† Run Mypy tests for NumPy """ + ctx.invoke(build) env = os.environ env['NPY_RUN_MYPY_IN_TESTSUITE'] = '1' ctx.params['pytest_args'] = [os.path.join('numpy', 'typing')] @@ -542,6 +551,13 @@ def mypy(ctx): ctx.forward(test) +@click.command() +def pyrefly() -> None: + """đŸĒ˛ Type-check the stubs with Pyrefly + """ + spin.util.run(['pyrefly', 'check']) + + @click.command() @click.option( '--concise', @@ -565,17 +581,13 @@ def stubtest(*, concise: bool, build_dir: str) -> None: stubtest_dir = curdir.parent / 'tools' / 'stubtest' mypy_config = stubtest_dir / 'mypy.ini' - allowlists = [stubtest_dir / 'allowlist.txt'] - if sys.version_info < (3, 12): - allowlists.append(stubtest_dir / 'allowlist_py311.txt') - else: - allowlists.append(stubtest_dir / 'allowlist_py312.txt') + allowlist = stubtest_dir / 'allowlist.txt' cmd = [ 'stubtest', '--ignore-disjoint-bases', f'--mypy-config-file={mypy_config}', - *(f'--allowlist={allowlist}' for allowlist in allowlists), + f'--allowlist={allowlist}', ] if concise: cmd.append('--concise') diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 0919790c65d1..15e8f7546cc2 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -16,3 +16,4 @@ to all interactions, including issues and PRs. For more, please read https://www.numpy.org/devdocs/dev/index.html Thank you for contributing, and happy coding! + diff --git a/INSTALL.rst b/INSTALL.rst index 6e9d2cd242f5..72caf98380b7 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -14,7 +14,7 @@ Prerequisites Building NumPy requires the following installed software: -1) Python__ 3.11.x or newer. +1) Python__ 3.12.x or newer. Please note that the Python development headers also need to be installed, e.g., on Debian/Ubuntu one needs to install both `python3` and @@ -82,7 +82,7 @@ Choosing compilers NumPy needs C and C++ compilers, and for development versions also needs Cython. A Fortran compiler isn't needed to build NumPy itself; the ``numpy.f2py`` tests will be skipped when running the test suite if no Fortran -compiler is available. +compiler is available. For more options including selecting compilers, setting custom compiler flags and controlling parallelism, see diff --git a/README.md b/README.md index 7bf1e13346ce..344631bc5601 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ NumPy is the fundamental package for scientific computing with Python. - **Source code:** https://github.com/numpy/numpy - **Contributing:** https://numpy.org/devdocs/dev/index.html - **Bug reports:** https://github.com/numpy/numpy/issues -- **Report a security vulnerability:** https://tidelift.com/docs/security +- **Report a security vulnerability:** https://github.com/numpy/numpy/security/policy (via Tidelift) It provides: diff --git a/benchmarks/benchmarks/bench_alloc_cache.py b/benchmarks/benchmarks/bench_alloc_cache.py new file mode 100644 index 000000000000..48793caeea7f --- /dev/null +++ b/benchmarks/benchmarks/bench_alloc_cache.py @@ -0,0 +1,45 @@ +"""Benchmarks for the NumPy small-allocation cache. + +NumPy caches data allocations smaller than 1024 bytes (up to 7 per size +bucket) to avoid repeated malloc/free calls. For float64 arrays this +means arrays with fewer than 128 elements hit the cache. + +These benchmarks measure tight create-and-discard loops so that the +allocator cache is exercised on every iteration after the first. +""" + +import numpy as np + +from .common import Benchmark + + +class SmallArrayCreation(Benchmark): + # Sizes chosen so that data bytes = size * 8 (float64). + # Cached: 1..127 → 8..1016 bytes (< 1024, hits the cache) + # Uncached: 128+ → 1024+ bytes (bypasses the cache) + params = [[1, 4, 16, 64, 127, 128, 256, 512]] + param_names = ['size'] + timeout = 60 + + def setup(self, size): + self.dtype = np.float64 + + def time_empty_loop(self, size): + dt = self.dtype + for _ in range(10_000): + np.empty(size, dtype=dt) + + def time_full_loop(self, size): + dt = self.dtype + for _ in range(10_000): + np.full(size, 1.0, dtype=dt) + + def time_ones_loop(self, size): + dt = self.dtype + for _ in range(10_000): + np.ones(size, dtype=dt) + + def time_zeros_loop(self, size): + dt = self.dtype + for _ in range(10_000): + np.zeros(size, dtype=dt) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 33e2871fc727..11d454ae41bf 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -2,6 +2,8 @@ import string +from asv_runner.benchmarks.mark import SkipNotImplemented + import numpy as np from .common import Benchmark @@ -189,6 +191,51 @@ def time_unique_all(self, array_size, percent_nans, return_inverse=True, return_counts=True) +class UniqueIntegers(Benchmark): + """Benchmark for np.unique with integer dtypes.""" + + param_names = ["array_size", "num_unique_values", "dtype"] + params = [ + # sizes of the 1D arrays + [200, 100000, 1000000], + # number of unique values in arrays + [25, 125, 5000, 50000, 250000], + # dtypes of the arrays + [np.uint8, np.int16, np.uint32, np.int64], + ] + + def setup(self, array_size, num_unique_values, dtype): + unique_array = np.arange(num_unique_values, dtype=dtype) + base_array = np.resize(unique_array, array_size) + rng = np.random.default_rng(121263137472525314065) + rng.shuffle(base_array) + self.arr = base_array + + def time_unique_values(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") + np.unique(self.arr, return_index=False, + return_inverse=False, return_counts=False) + + def time_unique_counts(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") + np.unique(self.arr, return_index=False, + return_inverse=False, return_counts=True,) + + def time_unique_inverse(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") + np.unique(self.arr, return_index=False, + return_inverse=True, return_counts=False) + + def time_unique_all(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") + np.unique(self.arr, return_index=True, + return_inverse=True, return_counts=True) + + class Isin(Benchmark): """Benchmarks for `numpy.isin`.""" diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index 49a7ae84fde6..6c8f4b15e102 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -252,7 +252,8 @@ class MatmulStrided(Benchmark): def __init__(self): self.args_map = { - 'matmul_m%03d_p%03d_n%03d_bs%02d' % arg: arg for arg in self.args + f'matmul_m{arg[0]:03}_p{arg[1]:03}_n{arg[2]:03}_bs{arg[3]:02}': arg + for arg in self.args } self.params = [list(self.args_map.keys())] diff --git a/benchmarks/benchmarks/bench_searchsorted.py b/benchmarks/benchmarks/bench_searchsorted.py new file mode 100644 index 000000000000..86cc625161d8 --- /dev/null +++ b/benchmarks/benchmarks/bench_searchsorted.py @@ -0,0 +1,35 @@ +import numpy as np + +from .common import Benchmark + + +class SearchSorted(Benchmark): + params = [ + [100, 10_000, 1_000_000, 100_000_000], # array sizes + [1, 10, 100_000], # number of query elements + ['ordered', 'random'], # query order + [False, True], # use sorter + [42, 18122022], # seed + ] + param_names = ['array_size', 'n_queries', 'query_order', 'use_sorter', 'seed'] + + def setup(self, array_size, n_queries, query_order, use_sorter, seed): + self.arr = np.arange(array_size, dtype=np.int32) + + rng = np.random.default_rng(seed) + + low = -array_size // 10 + high = array_size + array_size // 10 + + self.queries = rng.integers(low, high, size=n_queries, dtype=np.int32) + if query_order == 'ordered': + self.queries.sort() + + if use_sorter: + rng.shuffle(self.arr) + self.sorter = self.arr.argsort() + else: + self.sorter = None + + def time_searchsorted(self, array_size, n_queries, query_order, use_sorter, seed): + np.searchsorted(self.arr, self.queries, sorter=self.sorter) diff --git a/building_with_meson.md b/building_with_meson.md index 6498d3659bb0..602e80756fe5 100644 --- a/building_with_meson.md +++ b/building_with_meson.md @@ -14,9 +14,6 @@ have rough edges, please open an issue if you run into a problem._ *Note: also make sure you have `pkg-config` and the usual system dependencies for NumPy* -Then install spin: -- `python -m pip install spin` - **Compile and install:** `spin build` This builds in the `build/` directory, and installs into the `build-install` directory. diff --git a/doc/DISTUTILS.rst b/doc/DISTUTILS.rst deleted file mode 100644 index 142c15a7124a..000000000000 --- a/doc/DISTUTILS.rst +++ /dev/null @@ -1,622 +0,0 @@ -.. -*- rest -*- - -NumPy distutils - users guide -============================= - -.. contents:: - -SciPy structure -''''''''''''''' - -Currently SciPy project consists of two packages: - -- NumPy --- it provides packages like: - - + numpy.distutils - extension to Python distutils - + numpy.f2py - a tool to bind Fortran/C codes to Python - + numpy._core - future replacement of Numeric and numarray packages - + numpy.lib - extra utility functions - + numpy.testing - numpy-style tools for unit testing - + etc - -- SciPy --- a collection of scientific tools for Python. - -The aim of this document is to describe how to add new tools to SciPy. - - -Requirements for SciPy packages -''''''''''''''''''''''''''''''' - -SciPy consists of Python packages, called SciPy packages, that are -available to Python users via the ``scipy`` namespace. Each SciPy package -may contain other SciPy packages. And so on. Therefore, the SciPy -directory tree is a tree of packages with arbitrary depth and width. -Any SciPy package may depend on NumPy packages but the dependence on other -SciPy packages should be kept minimal or zero. - -A SciPy package contains, in addition to its sources, the following -files and directories: - -+ ``setup.py`` --- building script -+ ``__init__.py`` --- package initializer -+ ``tests/`` --- directory of unittests - -Their contents are described below. - -The ``setup.py`` file -''''''''''''''''''''' - -In order to add a Python package to SciPy, its build script (``setup.py``) -must meet certain requirements. The most important requirement is that the -package define a ``configuration(parent_package='',top_path=None)`` function -which returns a dictionary suitable for passing to -``numpy.distutils.core.setup(..)``. To simplify the construction of -this dictionary, ``numpy.distutils.misc_util`` provides the -``Configuration`` class, described below. - -SciPy pure Python package example ---------------------------------- - -Below is an example of a minimal ``setup.py`` file for a pure SciPy package:: - - #!/usr/bin/env python3 - def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('mypackage',parent_package,top_path) - return config - - if __name__ == "__main__": - from numpy.distutils.core import setup - #setup(**configuration(top_path='').todict()) - setup(configuration=configuration) - -The arguments of the ``configuration`` function specify the name of -parent SciPy package (``parent_package``) and the directory location -of the main ``setup.py`` script (``top_path``). These arguments, -along with the name of the current package, should be passed to the -``Configuration`` constructor. - -The ``Configuration`` constructor has a fourth optional argument, -``package_path``, that can be used when package files are located in -a different location than the directory of the ``setup.py`` file. - -Remaining ``Configuration`` arguments are all keyword arguments that will -be used to initialize attributes of ``Configuration`` -instance. Usually, these keywords are the same as the ones that -``setup(..)`` function would expect, for example, ``packages``, -``ext_modules``, ``data_files``, ``include_dirs``, ``libraries``, -``headers``, ``scripts``, ``package_dir``, etc. However, the direct -specification of these keywords is not recommended as the content of -these keyword arguments will not be processed or checked for the -consistency of SciPy building system. - -Finally, ``Configuration`` has ``.todict()`` method that returns all -the configuration data as a dictionary suitable for passing on to the -``setup(..)`` function. - -``Configuration`` instance attributes -------------------------------------- - -In addition to attributes that can be specified via keyword arguments -to ``Configuration`` constructor, ``Configuration`` instance (let us -denote as ``config``) has the following attributes that can be useful -in writing setup scripts: - -+ ``config.name`` - full name of the current package. The names of parent - packages can be extracted as ``config.name.split('.')``. - -+ ``config.local_path`` - path to the location of current ``setup.py`` file. - -+ ``config.top_path`` - path to the location of main ``setup.py`` file. - -``Configuration`` instance methods ----------------------------------- - -+ ``config.todict()`` --- returns configuration dictionary suitable for - passing to ``numpy.distutils.core.setup(..)`` function. - -+ ``config.paths(*paths) --- applies ``glob.glob(..)`` to items of - ``paths`` if necessary. Fixes ``paths`` item that is relative to - ``config.local_path``. - -+ ``config.get_subpackage(subpackage_name,subpackage_path=None)`` --- - returns a list of subpackage configurations. Subpackage is looked in the - current directory under the name ``subpackage_name`` but the path - can be specified also via optional ``subpackage_path`` argument. - If ``subpackage_name`` is specified as ``None`` then the subpackage - name will be taken the basename of ``subpackage_path``. - Any ``*`` used for subpackage names are expanded as wildcards. - -+ ``config.add_subpackage(subpackage_name,subpackage_path=None)`` --- - add SciPy subpackage configuration to the current one. The meaning - and usage of arguments is explained above, see - ``config.get_subpackage()`` method. - -+ ``config.add_data_files(*files)`` --- prepend ``files`` to ``data_files`` - list. If ``files`` item is a tuple then its first element defines - the suffix of where data files are copied relative to package installation - directory and the second element specifies the path to data - files. By default data files are copied under package installation - directory. For example, - - :: - - config.add_data_files('foo.dat', - ('fun',['gun.dat','nun/pun.dat','/tmp/sun.dat']), - 'bar/car.dat'. - '/full/path/to/can.dat', - ) - - will install data files to the following locations - - :: - - / - foo.dat - fun/ - gun.dat - pun.dat - sun.dat - bar/ - car.dat - can.dat - - Path to data files can be a function taking no arguments and - returning path(s) to data files -- this is a useful when data files - are generated while building the package. (XXX: explain the step - when this function are called exactly) - -+ ``config.add_data_dir(data_path)`` --- add directory ``data_path`` - recursively to ``data_files``. The whole directory tree starting at - ``data_path`` will be copied under package installation directory. - If ``data_path`` is a tuple then its first element defines - the suffix of where data files are copied relative to package installation - directory and the second element specifies the path to data directory. - By default, data directory are copied under package installation - directory under the basename of ``data_path``. For example, - - :: - - config.add_data_dir('fun') # fun/ contains foo.dat bar/car.dat - config.add_data_dir(('sun','fun')) - config.add_data_dir(('gun','/full/path/to/fun')) - - will install data files to the following locations - - :: - - / - fun/ - foo.dat - bar/ - car.dat - sun/ - foo.dat - bar/ - car.dat - gun/ - foo.dat - bar/ - car.dat - -+ ``config.add_include_dirs(*paths)`` --- prepend ``paths`` to - ``include_dirs`` list. This list will be visible to all extension - modules of the current package. - -+ ``config.add_headers(*files)`` --- prepend ``files`` to ``headers`` - list. By default, headers will be installed under - ``/include/pythonX.X//`` - directory. If ``files`` item is a tuple then it's first argument - specifies the installation suffix relative to - ``/include/pythonX.X/`` path. This is a Python distutils - method; its use is discouraged for NumPy and SciPy in favour of - ``config.add_data_files(*files)``. - -+ ``config.add_scripts(*files)`` --- prepend ``files`` to ``scripts`` - list. Scripts will be installed under ``/bin/`` directory. - -+ ``config.add_extension(name,sources,**kw)`` --- create and add an - ``Extension`` instance to ``ext_modules`` list. The first argument - ``name`` defines the name of the extension module that will be - installed under ``config.name`` package. The second argument is - a list of sources. ``add_extension`` method takes also keyword - arguments that are passed on to the ``Extension`` constructor. - The list of allowed keywords is the following: ``include_dirs``, - ``define_macros``, ``undef_macros``, ``library_dirs``, ``libraries``, - ``runtime_library_dirs``, ``extra_objects``, ``extra_compile_args``, - ``extra_link_args``, ``export_symbols``, ``swig_opts``, ``depends``, - ``language``, ``f2py_options``, ``module_dirs``, ``extra_info``, - ``extra_f77_compile_args``, ``extra_f90_compile_args``. - - Note that ``config.paths`` method is applied to all lists that - may contain paths. ``extra_info`` is a dictionary or a list - of dictionaries that content will be appended to keyword arguments. - The list ``depends`` contains paths to files or directories - that the sources of the extension module depend on. If any path - in the ``depends`` list is newer than the extension module, then - the module will be rebuilt. - - The list of sources may contain functions ('source generators') - with a pattern ``def (ext, build_dir): return - ``. If ``funcname`` returns ``None``, no sources - are generated. And if the ``Extension`` instance has no sources - after processing all source generators, no extension module will - be built. This is the recommended way to conditionally define - extension modules. Source generator functions are called by the - ``build_src`` sub-command of ``numpy.distutils``. - - For example, here is a typical source generator function:: - - def generate_source(ext,build_dir): - import os - from distutils.dep_util import newer - target = os.path.join(build_dir,'somesource.c') - if newer(target,__file__): - # create target file - return target - - The first argument contains the Extension instance that can be - useful to access its attributes like ``depends``, ``sources``, - etc. lists and modify them during the building process. - The second argument gives a path to a build directory that must - be used when creating files to a disk. - -+ ``config.add_library(name, sources, **build_info)`` --- add a - library to ``libraries`` list. Allowed keywords arguments are - ``depends``, ``macros``, ``include_dirs``, ``extra_compiler_args``, - ``f2py_options``, ``extra_f77_compile_args``, - ``extra_f90_compile_args``. See ``.add_extension()`` method for - more information on arguments. - -+ ``config.have_f77c()`` --- return True if Fortran 77 compiler is - available (read: a simple Fortran 77 code compiled successfully). - -+ ``config.have_f90c()`` --- return True if Fortran 90 compiler is - available (read: a simple Fortran 90 code compiled successfully). - -+ ``config.get_version()`` --- return version string of the current package, - ``None`` if version information could not be detected. This methods - scans files ``__version__.py``, ``_version.py``, - ``version.py``, ``__svn_version__.py`` for string variables - ``version``, ``__version__``, ``_version``. - -+ ``config.make_svn_version_py()`` --- appends a data function to - ``data_files`` list that will generate ``__svn_version__.py`` file - to the current package directory. The file will be removed from - the source directory when Python exits. - -+ ``config.get_build_temp_dir()`` --- return a path to a temporary - directory. This is the place where one should build temporary - files. - -+ ``config.get_distribution()`` --- return distutils ``Distribution`` - instance. - -+ ``config.get_config_cmd()`` --- returns ``numpy.distutils`` config - command instance. - -+ ``config.get_info(*names)`` --- - - -.. _templating: - -Conversion of ``.src`` files using templates --------------------------------------------- - -NumPy distutils supports automatic conversion of source files named -.src. This facility can be used to maintain very similar -code blocks requiring only simple changes between blocks. During the -build phase of setup, if a template file named .src is -encountered, a new file named is constructed from the -template and placed in the build directory to be used instead. Two -forms of template conversion are supported. The first form occurs for -files named .ext.src where ext is a recognized Fortran -extension (f, f90, f95, f77, for, ftn, pyf). The second form is used -for all other cases. - -.. index:: - single: code generation - -Fortran files -------------- - -This template converter will replicate all **function** and -**subroutine** blocks in the file with names that contain '<...>' -according to the rules in '<...>'. The number of comma-separated words -in '<...>' determines the number of times the block is repeated. What -these words are indicates what that repeat rule, '<...>', should be -replaced with in each block. All of the repeat rules in a block must -contain the same number of comma-separated words indicating the number -of times that block should be repeated. If the word in the repeat rule -needs a comma, leftarrow, or rightarrow, then prepend it with a -backslash ' \'. If a word in the repeat rule matches ' \\' then -it will be replaced with the -th word in the same repeat -specification. There are two forms for the repeat rule: named and -short. - -Named repeat rule -^^^^^^^^^^^^^^^^^ - -A named repeat rule is useful when the same set of repeats must be -used several times in a block. It is specified using , where N is the number of times the block -should be repeated. On each repeat of the block, the entire -expression, '<...>' will be replaced first with item1, and then with -item2, and so forth until N repeats are accomplished. Once a named -repeat specification has been introduced, the same repeat rule may be -used **in the current block** by referring only to the name -(i.e. ). - - -Short repeat rule -^^^^^^^^^^^^^^^^^ - -A short repeat rule looks like . The -rule specifies that the entire expression, '<...>' should be replaced -first with item1, and then with item2, and so forth until N repeats -are accomplished. - - -Pre-defined names -^^^^^^^^^^^^^^^^^ - -The following predefined named repeat rules are available: - -- - -- <_c=s,d,c,z> - -- <_t=real, double precision, complex, double complex> - -- - -- - -- - -- - - -Other files ------------- - -Non-Fortran files use a separate syntax for defining template blocks -that should be repeated using a variable expansion similar to the -named repeat rules of the Fortran-specific repeats. - -NumPy Distutils preprocesses C source files (extension: :file:`.c.src`) written -in a custom templating language to generate C code. The ``@`` symbol is -used to wrap macro-style variables to empower a string substitution mechanism -that might describe (for instance) a set of data types. - -The template language blocks are delimited by ``/**begin repeat`` -and ``/**end repeat**/`` lines, which may also be nested using -consecutively numbered delimiting lines such as ``/**begin repeat1`` -and ``/**end repeat1**/``: - -1. ``/**begin repeat`` on a line by itself marks the beginning of - a segment that should be repeated. - -2. Named variable expansions are defined using ``#name=item1, item2, item3, - ..., itemN#`` and placed on successive lines. These variables are - replaced in each repeat block with corresponding word. All named - variables in the same repeat block must define the same number of - words. - -3. In specifying the repeat rule for a named variable, ``item*N`` is short- - hand for ``item, item, ..., item`` repeated N times. In addition, - parenthesis in combination with ``*N`` can be used for grouping several - items that should be repeated. Thus, ``#name=(item1, item2)*4#`` is - equivalent to ``#name=item1, item2, item1, item2, item1, item2, item1, - item2#``. - -4. ``*/`` on a line by itself marks the end of the variable expansion - naming. The next line is the first line that will be repeated using - the named rules. - -5. Inside the block to be repeated, the variables that should be expanded - are specified as ``@name@``. - -6. ``/**end repeat**/`` on a line by itself marks the previous line - as the last line of the block to be repeated. - -7. A loop in the NumPy C source code may have a ``@TYPE@`` variable, targeted - for string substitution, which is preprocessed to a number of otherwise - identical loops with several strings such as ``INT``, ``LONG``, ``UINT``, - ``ULONG``. The ``@TYPE@`` style syntax thus reduces code duplication and - maintenance burden by mimicking languages that have generic type support. - -The above rules may be clearer in the following template source example: - -.. code-block:: NumPyC - :linenos: - :emphasize-lines: 3, 13, 29, 31 - - /* TIMEDELTA to non-float types */ - - /**begin repeat - * - * #TOTYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, DATETIME, - * TIMEDELTA# - * #totype = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, - * npy_long, npy_ulong, npy_longlong, npy_ulonglong, - * npy_datetime, npy_timedelta# - */ - - /**begin repeat1 - * - * #FROMTYPE = TIMEDELTA# - * #fromtype = npy_timedelta# - */ - static void - @FROMTYPE@_to_@TOTYPE@(void *input, void *output, npy_intp n, - void *NPY_UNUSED(aip), void *NPY_UNUSED(aop)) - { - const @fromtype@ *ip = input; - @totype@ *op = output; - - while (n--) { - *op++ = (@totype@)*ip++; - } - } - /**end repeat1**/ - - /**end repeat**/ - -The preprocessing of generically-typed C source files (whether in NumPy -proper or in any third party package using NumPy Distutils) is performed -by `conv_template.py`_. -The type-specific C files generated (extension: ``.c``) -by these modules during the build process are ready to be compiled. This -form of generic typing is also supported for C header files (preprocessed -to produce ``.h`` files). - -.. _conv_template.py: https://github.com/numpy/numpy/blob/main/numpy/distutils/conv_template.py - -Useful functions in ``numpy.distutils.misc_util`` -------------------------------------------------- - -+ ``get_numpy_include_dirs()`` --- return a list of NumPy base - include directories. NumPy base include directories contain - header files such as ``numpy/arrayobject.h``, ``numpy/funcobject.h`` - etc. For installed NumPy the returned list has length 1 - but when building NumPy the list may contain more directories, - for example, a path to ``config.h`` file that - ``numpy/base/setup.py`` file generates and is used by ``numpy`` - header files. - -+ ``append_path(prefix,path)`` --- smart append ``path`` to ``prefix``. - -+ ``gpaths(paths, local_path='')`` --- apply glob to paths and prepend - ``local_path`` if needed. - -+ ``njoin(*path)`` --- join pathname components + convert ``/``-separated path - to ``os.sep``-separated path and resolve ``..``, ``.`` from paths. - Ex. ``njoin('a',['b','./c'],'..','g') -> os.path.join('a','b','g')``. - -+ ``minrelpath(path)`` --- resolves dots in ``path``. - -+ ``rel_path(path, parent_path)`` --- return ``path`` relative to ``parent_path``. - -+ ``def get_cmd(cmdname,_cache={})`` --- returns ``numpy.distutils`` - command instance. - -+ ``all_strings(lst)`` - -+ ``has_f_sources(sources)`` - -+ ``has_cxx_sources(sources)`` - -+ ``filter_sources(sources)`` --- return ``c_sources, cxx_sources, - f_sources, fmodule_sources`` - -+ ``get_dependencies(sources)`` - -+ ``is_local_src_dir(directory)`` - -+ ``get_ext_source_files(ext)`` - -+ ``get_script_files(scripts)`` - -+ ``get_lib_source_files(lib)`` - -+ ``get_data_files(data)`` - -+ ``dot_join(*args)`` --- join non-zero arguments with a dot. - -+ ``get_frame(level=0)`` --- return frame object from call stack with given level. - -+ ``cyg2win32(path)`` - -+ ``mingw32()`` --- return ``True`` when using mingw32 environment. - -+ ``terminal_has_colors()``, ``red_text(s)``, ``green_text(s)``, - ``yellow_text(s)``, ``blue_text(s)``, ``cyan_text(s)`` - -+ ``get_path(mod_name,parent_path=None)`` --- return path of a module - relative to parent_path when given. Handles also ``__main__`` and - ``__builtin__`` modules. - -+ ``allpath(name)`` --- replaces ``/`` with ``os.sep`` in ``name``. - -+ ``cxx_ext_match``, ``fortran_ext_match``, ``f90_ext_match``, - ``f90_module_name_match`` - -``numpy.distutils.system_info`` module --------------------------------------- - -+ ``get_info(name,notfound_action=0)`` -+ ``combine_paths(*args,**kws)`` -+ ``show_all()`` - -``numpy.distutils.cpuinfo`` module ----------------------------------- - -+ ``cpuinfo`` - -``numpy.distutils.log`` module ------------------------------- - -+ ``set_verbosity(v)`` - - -``numpy.distutils.exec_command`` module ---------------------------------------- - -+ ``get_pythonexe()`` -+ ``find_executable(exe, path=None)`` -+ ``exec_command( command, execute_in='', use_shell=None, use_tee=None, **env )`` - -The ``__init__.py`` file -'''''''''''''''''''''''' - -The header of a typical SciPy ``__init__.py`` is:: - - """ - Package docstring, typically with a brief description and function listing. - """ - - # import functions into module namespace - from .subpackage import * - ... - - __all__ = [s for s in dir() if not s.startswith('_')] - - from numpy.testing import Tester - test = Tester().test - bench = Tester().bench - -Extra features in NumPy Distutils -''''''''''''''''''''''''''''''''' - -Specifying config_fc options for libraries in setup.py script -------------------------------------------------------------- - -It is possible to specify config_fc options in setup.py scripts. -For example, using:: - - config.add_library('library', - sources=[...], - config_fc={'noopt':(__file__,1)}) - -will compile the ``library`` sources without optimization flags. - -It's recommended to specify only those config_fc options in such a way -that are compiler independent. - -Getting extra Fortran 77 compiler options from source ------------------------------------------------------ - -Some old Fortran codes need special compiler options in order to -work correctly. In order to specify compiler options per source -file, ``numpy.distutils`` Fortran compiler looks for the following -pattern:: - - CF77FLAGS() = - -in the first 20 lines of the source and use the ``f77flags`` for -specified type of the fcompiler (the first character ``C`` is optional). - -TODO: This feature can be easily extended for Fortran 90 codes as -well. Let us know if you would need such a feature. diff --git a/doc/Makefile b/doc/Makefile index 545b10de3384..e6e0689481ca 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -39,7 +39,7 @@ help: @echo " clean to remove generated doc files and start fresh" @echo " docenv make a virtual environment in which to build docs" @echo " html to make standalone HTML files" - @echo " htmlhelp to make HTML files and a HTML help project" + @echo " htmlhelp to make HTML files and an HTML help project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview over all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index c8c1f129c0b2..2abc89fcd5aa 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -17,8 +17,6 @@ documentation. There are a few ways to streamline things: - Git can be set up to use a keyring to store your GitHub personal access token. Search online for the details. -- You can use the ``keyring`` app to store the PyPI password for twine. See the - online twine documentation for details. Prior to release @@ -97,7 +95,9 @@ make sure that the release notes have an entry in the ``release.rst`` file:: Generate the changelog ---------------------- -The changelog is generated using the changelog tool:: +The changelog is generated using the changelog tool (``spin changelog``), +which collects merged pull requests and formats them into a release-ready +changelog:: $ spin changelog $GITHUB v2.3.0..maintenance/2.4.x > doc/changelog/2.4.0-changelog.rst @@ -120,7 +120,7 @@ run ``spin notes``, which will incorporate the snippets into the $ spin notes $ gvim doc/source/release/notes-towncrier.rst doc/source/release/2.4.0-notes.rst -Once the ``notes-towncrier`` contents has been incorporated into release note +Once the ``notes-towncrier`` contents have been incorporated into release notes the ``.. include:: notes-towncrier.rst`` directive can be removed. The notes will always need some fixups, the introduction will need to be written, and significant changes should be called out. For patch releases the changelog text @@ -134,7 +134,7 @@ Test the wheel builds After the release PR is merged, go to the ``numpy-release`` repository in your browser and manually trigger the workflow on the ``maintenance/2.4.x`` branch using the ``Run workflow`` button in ``actions``. Make sure that the upload -target is ``none`` in the *evironment* dropdown. The wheels take about 1 hour +target is ``none`` in the *environment* dropdown. The wheels take about 1 hour to build, but sometimes GitHub is very slow. If some wheel builds fail for unrelated reasons, you can re-run them as normal in the GitHub Actions UI with ``re-run failed``. After the wheels are built review the results, checking that @@ -185,7 +185,7 @@ If you need to delete the tag due to error:: Go to the ``numpy-release`` repository in your browser and manually trigger the workflow on the ``maintenance/2.4.x`` branch using the ``Run workflow`` button in ``actions``. Make sure that the upload target is ``pypi`` in the -*evironment* dropdown. the wheels take about 1 hour to build, but sometimes +*environment* dropdown. The wheels take about 1 hour to build, but sometimes GitHub is very slow. If some wheel builds fail for unrelated reasons, you can re-run them as normal in the GitHub Actions UI with ``re-run failed``. After the wheels are built review the results, checking that the number of artifacts @@ -312,10 +312,12 @@ This assumes that you have forked ``_:: - For all releases, go to the bottom of the page and add a one line link. Look to the previous links for example. -- For the ``*.0`` release in a cycle, add a new section at the top with a short - description of the new features and point the news link to it. -- Edit the newsHeader and date fields at the top of news.md -- Also edit the buttonText on line 14 in content/en/config.yaml +- For the ``*.0`` release in a cycle: + + - Add a new section at the top with a short description of the new + features and point the news link to it. + - Edit the newsHeader and date fields at the top of news.md + - Also edit the buttonText on line 14 in content/en/config.yaml commit and push:: diff --git a/doc/TESTS.rst b/doc/TESTS.rst index 803625e727ae..f0cd063687fd 100644 --- a/doc/TESTS.rst +++ b/doc/TESTS.rst @@ -63,8 +63,10 @@ example, the ``_core`` module, use the following:: Running tests from the command line ----------------------------------- -If you want to build NumPy in order to work on NumPy itself, use the ``spin`` -utility. To run NumPy's full test suite:: +If you want to build NumPy in order to work on NumPy itself, use the +:ref:`spin utility `. + +To run NumPy's full test suite:: $ spin test -m full diff --git a/doc/changelog/2.4.0-changelog.rst b/doc/changelog/2.4.0-changelog.rst new file mode 100644 index 000000000000..472811f6be62 --- /dev/null +++ b/doc/changelog/2.4.0-changelog.rst @@ -0,0 +1,828 @@ + +Contributors +============ + +A total of 142 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* !MyUserNameWasTakenLinux + +* !ianlv + +* !kostayScr + +* !olivier + +* Aadya Chinubhai + +* Aaron Kollasch + +* Abdu Zoghbi + +* Abhishek Kumar +* Abhishek Tiwari + +* Adam Turner + +* Akhil Kannan + +* Aleksandr A. Voyt + +* Amelia Thurdekoos + +* Andrew Nelson +* Angus Gibson + +* Anik Chand + +* Aniket Singh Yadav + +* Ankit Ahlawat + +* Arthur Lacote + +* Ben Woodruff +* Bernard Roesler + +* Brad Smith + +* Britney Whittington + +* Carlos Martin +* Charles Harris +* Charlie Lin + +* Chris Navarro +* Christian Barbia + +* Christian Bourjau + +* Christine P. Chai +* Christopher Sidebottom +* ClÊment Robert +* Copilot + +* Dan Raviv + +* Daniel Bertalan + +* David Seifert + +* Dennis Van de Vorst + +* Developer-Ecosystem-Engineering +* Diego Atencia + +* Dillon Niederhut +* Dimitri Papadopoulos Orfanos +* Diya Singh + +* Evgeni Burovski +* Faizan-Ul Huda + +* François Rozet +* GermÃĄn Godoy Gutierrez + +* Gubaydullin Danis + +* Guido Imperiale +* Hamza Meel + +* Hannah Aizenman +* Henry Schreiner +* Hunter Hogan + +* Iason Krommydas + +* Inessa Pawson +* Jake VanderPlas +* Jingu Kang + +* Joe Rickerby + +* Johnnie Gray + +* Jonathan Reimer + +* Joren Hammudoglu +* Kaiyuan Yang + +* Kelvin Li + +* Khelf Mohamed + +* Koki Watanabe + +* Kumar Aditya + +* Leonardo Paredes + +* Lucas Colley +* Lysandros Nikolaou +* Maanas Arora +* Marc Redemske + +* Marco Barbosa +* Marco Edward Gorelli +* Mark Ryan +* Marten van Kerkwijk +* Maryanne Wachter +* Mateusz SokÃŗÅ‚ +* Matthias Bussonnier +* Matti Picus +* Melissa Weber Mendonça +* Michael Davidsaver +* Michael Siebert +* Michał GÃŗrny +* Mohammed Abdul Rahman +* Mohammed Zuhaib + +* Mohit Deoli + +* Moritz Groß + +* Mugundan Selvanayagam +* Muhammad Maaz + +* Mukulika Pahari +* Nathan Goldbaum +* Nicholas Bidler + +* Paresh Joshi + +* Parsa Shemirani + +* Paul Caprioli + +* Phoenix Studio + +* Pieter Eendebak +* Rafael Laboissière + +* Raghuveer Devulapalli +* Ralf BÃŧrkle + +* Ralf Gommers +* Richard Smythe + +* Riku Sakamoto + +* Rohit Goswami +* Ross Barnowski +* Rupesh Sharma + +* Sachin Shah + +* Samruddhi Baviskar + +* Sandeep Gupta + +* Sandro + +* Sanjay Kumar Sakamuri Kamalakar + +* Sarang Joshi + +* Sayed Awad +* Sebastian Berg +* Sergey Fedorov + +* Shirong Wang + +* Shyok Mutsuddi + +* Simola Nayak + +* Stan Ulbrych +* Steven Hur + +* Swayam Singh + +* T.Yamada + +* Tim Hoffmann +* Timileyin Daso + +* Tobias Markus + +* Tontonio3 + +* Toshaksha + +* Trey Cole + +* Tyler Reddy +* Varad Raj Singh + +* Veit Heller + +* Vineet Kumar + +* Wang Yang (杨æ—ē) +* Warren Weckesser +* William Pursell + +* Xiaoyu + +* Yasir Ashfaq + +* Yuki K +* Yuvraj Pradhan +* Zebreus + +* Zhi Li + +* dependabot[bot] + +Pull requests merged +==================== + +A total of 673 pull requests were merged for this release. + +* `#23513 `__: ENH: speed up einsum with optimize using batched matmul +* `#24501 `__: DOC: add description of dtype b1 in arrays.dtypes.rst +* `#25245 `__: ENH: Enable native half-precision scalar conversion operations... +* `#28147 `__: DOC: Fix ambiguity in polyfit description +* `#28158 `__: DOC: Update CONTRIBUTING.rst +* `#28590 `__: ENH: Use array indexing preparation routines for flatiter objects +* `#28595 `__: BUG: quantile should error when weights are all zeros +* `#28622 `__: ENH, SIMD: Initial implementation of Highway wrapper +* `#28767 `__: ENH: np.unique: support hash based unique for string dtype +* `#28826 `__: DOC: Add flat examples to argmax and argmin +* `#28896 `__: ENH: Modulate dispatched x86 CPU features +* `#28925 `__: DEP: Deprecate setting the strides attribute of a numpy array +* `#28955 `__: MNT: Update windows-2019 to windows-2022[wheel build] +* `#28970 `__: MNT: Enforce ruff/Perflint rules (PERF) +* `#28979 `__: DOC: improves np.fromfile file description (#28840) +* `#28983 `__: MAINT: Options to catch more issues reported by pytest +* `#28985 `__: MNT: constant string arrays instead of pointers in C +* `#28996 `__: ENH: add __array_function__ protocol in polynomial +* `#29007 `__: CI: update cibuildwheel to 3.0.0b1 and enable cp314 and cp314t... +* `#29012 `__: TYP: Type ``MaskedArray.__{add,radd,sub,rsub}__`` +* `#29019 `__: BEG, MAINT: Begin NumPy 2.4.0 development. +* `#29022 `__: MAINT: Convert multiarray to multi-phase init (PEP 489) +* `#29028 `__: MAINT: Convert pocketfft_umath to multi-phase init (PEP 489) +* `#29032 `__: BUG: Fix workflow bug +* `#29034 `__: BUG: Avoid compile errors in f2py modules +* `#29036 `__: DOC: Expand/clean up extension module import error +* `#29039 `__: BLD: bump OpenBLAS version, use OpenBLAS for win-arm64 +* `#29040 `__: BUG: Fix f2py derived types in modules +* `#29041 `__: BUG: Fix cache use regression +* `#29048 `__: TYP: annotate ``strings.slice`` +* `#29050 `__: TYP: remove expired ``tostring`` methods +* `#29051 `__: MNT: use pypy 3.11 nightly which has a fix for ctypeslib +* `#29052 `__: ENH: show warning when np.maximum receives more than 2 inputs +* `#29053 `__: BLD: allow targeting webassembly without emscripten +* `#29057 `__: TYP: fix invalid overload definition in ``_core.defchararray.add`` +* `#29058 `__: TYP: fill in some of the missing annotations in the stubs +* `#29060 `__: BUG: add bounds-checking to in-place string multiply +* `#29061 `__: BLD: use sonoma image on Cirrus for wheel build +* `#29066 `__: DOC: fix typo in documentation of vecmat +* `#29068 `__: MAINT: Enforce ruff E501 +* `#29078 `__: CI: clean up cibuildwheel config a bit +* `#29080 `__: CI: bump to cibuildwheel 3.0.0b4 +* `#29083 `__: MAINT: Avoid use of deprecated _PyDict_GetItemStringWithError... +* `#29084 `__: BENCH: Increase array sizes for ufunc and sort benchmarks +* `#29085 `__: MAINT: Bump ``scipy-doctest`` to 1.8 +* `#29088 `__: MAINT: Add ``build-\*`` directories to ``.gitignore`` +* `#29091 `__: BUG: f2py: thread-safe forcomb +* `#29092 `__: TYP: fix ``NDArray[integer]`` inplace operator mypy issue +* `#29093 `__: MAINT: Bump ossf/scorecard-action from 2.4.1 to 2.4.2 +* `#29094 `__: BUG: remove ``NPY_ALIGNMENT_REQUIRED`` +* `#29095 `__: MAINT: bump ``mypy`` to ``1.16.0`` +* `#29097 `__: TYP: run mypy in strict mode +* `#29098 `__: PERF: Make NpzFile member existence checks constant-time +* `#29105 `__: BUG: Allow np.percentile to operate on float16 data +* `#29106 `__: DOC: Fix some incorrect reST markups +* `#29111 `__: MAINT: fix SPDX license expressions for LAPACK, GCC runtime libs +* `#29112 `__: ENH: Improve error message in numpy.testing.assert_array_compare +* `#29115 `__: MAINT: cleanup from finalized concatenate deprecation +* `#29119 `__: DOC: remove very outdated info on ATLAS +* `#29120 `__: TYP: minor ufunc alias fixes in ``__init__.pyi`` +* `#29121 `__: MAINT: Bump github/codeql-action from 3.28.18 to 3.28.19 +* `#29122 `__: DOC: fix typo in Numpy's module structure +* `#29128 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.1.1 to 3.2.0 +* `#29129 `__: ENH: add a casting option 'same_value' and use it in np.astype +* `#29133 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.4 to 2.0.5 +* `#29137 `__: BUG: make round consistently return a copy +* `#29141 `__: MAINT: Update main after 2.3.0 release. +* `#29142 `__: TYP: update typing stubs for ``_pyinstaller/hook-numpy.py`` +* `#29143 `__: DOC: Document assertion comparison behavior between scalar and... +* `#29145 `__: TYP: add missing ``numpy.lib`` exports +* `#29146 `__: TYP: fix minor ``f2py`` stub inconsistencies +* `#29147 `__: BUG: Missing array-api ``capabilities()`` key +* `#29148 `__: TST: migrating from pytz to zoneinfo + tzdata (where needed) +* `#29149 `__: CI: Bump ``array-api-tests`` to ``v2025.05.23`` +* `#29154 `__: DOC: Remove version switcher colors +* `#29155 `__: TYP: ``double = float64`` and ``cdouble = complex128`` +* `#29156 `__: CI: Run mypy with Python 3.13 +* `#29158 `__: DOC: tweak release walkthrough for numpy.org news blurb +* `#29160 `__: DOC: Suppress distutils doc build warnings for python 3.12+ +* `#29165 `__: ENH: Use itertools.product for ndindex to improve performance +* `#29166 `__: TYP: Fix missing ``_core.numeric`` (re-)exports +* `#29168 `__: TYP: Simplified ``dtype.__new__`` overloads +* `#29169 `__: TYP: ``out=...`` in ufuncs +* `#29170 `__: TYP: ``numpy._NoValue`` +* `#29171 `__: TYP: Accept dispatcher function with optional returns in ``_core.overrides`` +* `#29175 `__: TYP: Fix invalid inline annotations in ``lib._function_base_impl`` +* `#29176 `__: TYP: ``any(None)`` and ``all(None)`` +* `#29177 `__: TYP: ``lib._iotools`` annotation improvements +* `#29179 `__: BUG: fix matmul with transposed out arg +* `#29180 `__: MAINT: Bump pypa/cibuildwheel from 3.0.0b4 to 3.0.0 +* `#29181 `__: TYP: fix and improve ``numpy.lib._utils_impl`` +* `#29183 `__: STY: ruff/isort config tweaks +* `#29184 `__: TYP: fix ``ravel_multi_index`` false rejections +* `#29185 `__: STY: ruff/isort config tweaks - episode 2 +* `#29186 `__: MAINT: bump ``ruff`` to ``0.11.13`` +* `#29187 `__: TYP: add ``__all__`` in ``numpy._core.__init__`` +* `#29188 `__: MAINT: strides comparison performance fix, compare discussion... +* `#29195 `__: MAINT: Bump github/codeql-action from 3.28.19 to 3.29.0 +* `#29196 `__: BUG: Revert ``np.vectorize`` casting to legacy behavior +* `#29197 `__: TST: additional tests for matmul with non-contiguous input and... +* `#29204 `__: TYP: fix ``ndarray.__array__`` annotation for ``copy`` +* `#29208 `__: ENH: improve Timsort with powersort merge-policy +* `#29210 `__: BUG: fix linting +* `#29212 `__: CI: Add native ``ppc64le`` CI job using GitHub Actions +* `#29215 `__: CI: Add support for building NumPy with LLVM for Win-ARM64 +* `#29216 `__: MAINT: Fix some undef warnings +* `#29218 `__: TYP: Workaround for a mypy issue in ``ndarray.__iter__`` +* `#29219 `__: MAINT: bump ``mypy`` to ``1.16.1`` +* `#29220 `__: MAINT: bump ``ruff`` to ``0.12.0`` +* `#29221 `__: ENH: Detect CPU features on OpenBSD ARM and PowerPC64 +* `#29223 `__: BUG: Address interaction between SME and FPSR +* `#29224 `__: ENH: Detect CPU features on FreeBSD / OpenBSD RISC-V64. +* `#29227 `__: TYP: Support iteration of ``StringDType`` arrays +* `#29230 `__: BUG: avoid negating unsigned integers in resize implementation +* `#29231 `__: BUG: Enforce integer limitation in concatenate +* `#29232 `__: TST: Fix test that uses uninitialized memory +* `#29240 `__: ENH: Let numpy.size accept multiple axes. +* `#29248 `__: TYP: Work around a mypy issue with bool arrays +* `#29250 `__: MAINT: Enable linting with ruff E501 +* `#29252 `__: DOC: Fix some markup errors +* `#29254 `__: DOC: Clarify dtype argument for __array__ in custom container... +* `#29257 `__: MAINT: Update main after 2.3.1 release. +* `#29265 `__: TYP: Type ``MaskedArray.__{mul,rmul}__`` +* `#29269 `__: BUG: fix fencepost error in StringDType internals +* `#29271 `__: TYP: Add overloads for ``MaskedArray.__{div,rdiv,floordiv,rfloordiv}__`` +* `#29272 `__: MAINT: Fix ``I001`` ruff error on main +* `#29273 `__: ENH: Extend numpy.pad to handle pad_width dictionary argument. +* `#29275 `__: DOC: avoid searching some directories for doxygen-commented source... +* `#29277 `__: TYP: Add type annotations for ``MaskedArray.__{pow,rpow}__`` +* `#29278 `__: TYP: fix overloads where ``out: _ArrayT`` was typed as being... +* `#29281 `__: BUG: Include python-including headers first +* `#29285 `__: MAINT: Bump github/codeql-action from 3.29.0 to 3.29.1 +* `#29286 `__: BUG: handle case in mapiter where descriptors might get replaced +* `#29289 `__: BUG: Fix macro redefinition +* `#29290 `__: BUG: Fix version check in blas_utils.c +* `#29291 `__: MAINT: Enable linting with ruff E501 +* `#29296 `__: MAINT: Bump github/codeql-action from 3.29.1 to 3.29.2 +* `#29300 `__: MAINT: Enable linting with ruff E501 +* `#29301 `__: DEP: Give a visible warning when ``align=`` to dtype is a non-bool +* `#29302 `__: DOCS: Remove incorrect "Returns" section from ``MaskedArray.sort`` +* `#29303 `__: TYP: Add shape typing to return values of ``np.nonzero`` and... +* `#29305 `__: TYP: add explicit types for np.quantile +* `#29306 `__: DOC: remove redundant words +* `#29307 `__: TYP: Type ``MaskedArray.{trace,round,cumsum,cumprod}`` +* `#29308 `__: Fix incorrect grammar in TypeError message for ufunc argument... +* `#29309 `__: TYP: Type ``MaskedArray.dot`` and ``MaskedArray.anom`` +* `#29310 `__: TYP: rename ``_T`` to ``_ScalarT`` in ``matlib.pyi`` for consistency +* `#29311 `__: DOCS: Fix rendering of ``MaskedArray.anom`` ``dtype`` +* `#29312 `__: BLD: remove unused github workflow +* `#29313 `__: BUG: Allow reading non-npy files in npz and add test +* `#29314 `__: MAINT: Replace setting of array shape by reshape operation +* `#29316 `__: MAINT: remove out-of-date comment +* `#29318 `__: BUG: Fix np.testing utils failing for masked scalar vs. scalar... +* `#29320 `__: DOC: Fix spelling +* `#29321 `__: MNT: Cleanup infs handling in np.testing assertion utilities +* `#29322 `__: MAINT: remove internal uses of assert_warns and suppress_warnings +* `#29325 `__: DOC: Clarify assert_allclose differences vs. allclose +* `#29327 `__: MAINT: Rename nep-0049.rst. +* `#29329 `__: BLD: update ``highway`` submodule to latest master +* `#29331 `__: TYP: ``svd`` overload incorrectly noted ``Literal[False]`` to... +* `#29332 `__: TYP: Allow passing ``dtype=None`` to ``trace`` +* `#29333 `__: BUG: Always return a real dtype from linalg.cond (gh-18304) +* `#29334 `__: MAINT: Bump pypa/cibuildwheel from 3.0.0 to 3.0.1 +* `#29335 `__: DOC: vectorize with signature doesn't pre-call function +* `#29338 `__: API,BUG: Fix scalar handling in array-interface allowing NULL... +* `#29340 `__: TYP: correct default value of ``unicode`` in ``chararray.__new__``... +* `#29341 `__: TST: Avoid uninitialized values in test +* `#29343 `__: DOC: Add missing ``self`` in ``__array_ufunc__`` signature +* `#29347 `__: DOC: Fix NEP 49 Resolution Link Formatting (part of #29328) +* `#29351 `__: BLD: print long double format used +* `#29356 `__: BUG: fix test_npy_uintp_type_enum +* `#29358 `__: BUG: Fix reference leakage for output arrays in reduction functions +* `#29362 `__: DOC: specify that ``numpy.nan_to_num`` supports array like arguments +* `#29364 `__: TST: refactor typing check for @ +* `#29368 `__: BUG: avoid segmentation fault in ``string_expandtabs_length_promoter`` +* `#29369 `__: BUG: fix casting issue in center, ljust, rjust, and zfill +* `#29370 `__: ENH: Allow subscript access for ``np.bool`` by adding ``__class_getitem__`` +* `#29371 `__: MNT: add linter for thread-unsafe C API uses +* `#29372 `__: BUG: Fix np.unique with axis=0 and 1D input not collapsing NaNs... +* `#29374 `__: DEV: remove "packages" from ``.gitignore`` +* `#29375 `__: STY: Fix typo in npy_cpu_dispatch.c +* `#29377 `__: TYP: Fix overloads in ``np.char.array`` and ``np.char.asarray``... +* `#29380 `__: BUG: Fix repeatability issues in test suite +* `#29381 `__: TYP: Type ``MaskedArray.{sum,std,var,mean,prod}`` +* `#29383 `__: TYP: Type ``MaskedArray.view`` +* `#29385 `__: BLD: Add sw_64 support +* `#29386 `__: DOC: Fix ``PyArrayMapIterObject`` document +* `#29387 `__: DOC: document ``mean`` parameter in ``ndarray.std`` and ``ndarray.var``... +* `#29390 `__: DOC: better differentiate arrays in dstack docstring +* `#29392 `__: BUG: Any dtype should call ``square`` on ``arr \*\* 2`` +* `#29394 `__: ENH: avoid thread safety issues around uses of ``PySequence_Fast`` +* `#29396 `__: ENH: Show unit information in repr for datetime64("NaT") +* `#29401 `__: TYP: Type ``MaskedArray.resize``\ , wrap ``NoReturn`` tests in... +* `#29402 `__: DOC: Correct more ndarray defaults +* `#29403 `__: MAINT: remove unnecessary ``kwargs`` update in ``MaskedArray.reshape`` +* `#29404 `__: TYP: Type ``MaskedArray.reshape`` +* `#29405 `__: MAINT/BUG: Followups for PySequence_Fast locking +* `#29406 `__: MAINT: Bump github/codeql-action from 3.29.2 to 3.29.3 +* `#29407 `__: MAINT: use a stable pypy release in CI +* `#29411 `__: BUG: fix datetime/timedelta hash memory leak +* `#29418 `__: TYP: Type ``MaskedArray.__deepcopy__`` and ``MaskedArray.argsort`` +* `#29419 `__: DOC: Fix index name in notes for np.take +* `#29423 `__: BUG: allow ``MaskedArray.fill_value`` be a string when ``dtype=StringDType`` +* `#29426 `__: MAINT: Bump github/codeql-action from 3.29.3 to 3.29.4 +* `#29427 `__: DOC: Remove outdated ``numpy.exceptions`` compatibility note. +* `#29428 `__: TYP: Add test which hits ``np.array`` constructor overload with... +* `#29431 `__: ENH: Enable RVV acceleration for auto-vectorization in RISC-V +* `#29432 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#29433 `__: MAINT: Bump pypa/cibuildwheel from 3.0.1 to 3.1.0 +* `#29435 `__: BUG: Fix test_configtool_pkgconfigdir to resolve PKG_CONFIG_DIR... +* `#29436 `__: BLD: allow targeting webassembly without emscripten +* `#29441 `__: MAINT: Update main after 2.3.2 release. +* `#29444 `__: MAINT: Add Python 3.14 to classifier. +* `#29445 `__: DOC: Update RELEASE_WALKTHROUGH.rst +* `#29450 `__: DOC: Clarify that ``numpy.printoptions`` applies only to ``ndarray``\... +* `#29456 `__: MAINT: Replace pavement.py +* `#29457 `__: TYP: Type ``MaskedArray.__new__`` +* `#29459 `__: BLD: provide explicit control over cpu-baseline detection +* `#29466 `__: TYP: Type ``MaskedArray.flat`` +* `#29467 `__: TYP: Type ``MaskedArray.recordmask`` +* `#29468 `__: TYP: Type ``MaskedArray.fill_value`` +* `#29470 `__: TYP: Remove ``MaskedArray.__reduce__``\ , and punt on ``MaskedArray.__{eq,ne}__```... +* `#29471 `__: MAINT: Bump pypa/cibuildwheel from 3.1.0 to 3.1.2 +* `#29472 `__: TYP: Type ``MaskedArray.__getitem__`` +* `#29478 `__: TYP: Type ``MaskedArray.__setitem__`` +* `#29479 `__: MAINT: Do not exclude ``typing/tests/data`` from ruff +* `#29480 `__: TYP: Type ``MaskedArray.compress`` +* `#29481 `__: MAINT: Add .file entry to all .s SVML files +* `#29483 `__: TYP: Type ``MaskedArray.__array_finalize__`` and ``MaskedArray.__array_wrap__`` +* `#29484 `__: MAINT: Bump github/codeql-action from 3.29.4 to 3.29.5 +* `#29487 `__: CI: Add UBSAN CI jobs for macOS arm64 and Linux x86-64 +* `#29489 `__: DOC: Add 'See Also' refs for sign, copysign and signbit. +* `#29493 `__: MAINT: bump ``mypy`` to ``1.17.1`` +* `#29495 `__: MAINT: Bump pypa/cibuildwheel from 3.1.2 to 3.1.3 +* `#29502 `__: DOC: Add narrative documentation for printing NumPy arrays +* `#29504 `__: ENH: Use extern C in arraytypes.h.src file for cpp files +* `#29505 `__: BUG: Casting from one timedelta64 to another didn't handle NAT. +* `#29509 `__: BLD: update vendored Meson to 1.8.3 +* `#29512 `__: TST: don't explicitly specify -j in TSAN build +* `#29513 `__: MAINT: Bump hypothesis to 6.137.1 +* `#29514 `__: DOC: Add 'today' string to datetime64 documentation +* `#29521 `__: MAINT: Bump actions/download-artifact from 4.3.0 to 5.0.0 +* `#29522 `__: BUG: random: Fix handling of very small p in Generator.binomial. +* `#29526 `__: TYP: Type ``MaskedIterator`` +* `#29529 `__: MAINT: Bump actions/cache from 4.2.3 to 4.2.4 +* `#29530 `__: MAINT: Bump github/codeql-action from 3.29.5 to 3.29.6 +* `#29531 `__: TYP: Type default values in stubs in ``numpy/ma`` +* `#29532 `__: DOC:Clarify build compatibility to dev depending page +* `#29533 `__: MAINT: Bump github/codeql-action from 3.29.6 to 3.29.8 +* `#29534 `__: DOC: Add 'now' string to datetime64 documentation +* `#29535 `__: BLD: update licensing metadata to use PEP 639 +* `#29537 `__: ENH: np.unique: support hash based unique for complex dtype +* `#29539 `__: BUG: left bit shift undefined behavior +* `#29540 `__: CI: run some wheel build jobs by default, and clean up the rest +* `#29541 `__: STY: fix typo in dtypemeta.c [skip azp][skip actions] +* `#29542 `__: MAINT: Bump actions/checkout from 4.2.2 to 5.0.0 +* `#29546 `__: fix: File exists error on macOS when running spin lint +* `#29548 `__: MAINT: Use double quotes (ruff rule ``Q``\ ) (only on ``.pyi``... +* `#29550 `__: DEP: Deprecate NumPy warning control utilities +* `#29551 `__: BUG: resolve invalid grep with env neutral script +* `#29553 `__: MAINT: Bump github/codeql-action from 3.29.8 to 3.29.9 +* `#29555 `__: BUG: Fix metadata not roundtripping when pickling datetime +* `#29557 `__: DOC: Document datetime and timedelta to python's object type... +* `#29564 `__: TYP: use ``TypeAliasType`` for ``ArrayLike`` and ``DTypeLike``... +* `#29565 `__: STY: ruff rule name comments +* `#29569 `__: ENH: Add ndmax parameter to np.array to control recursion depth +* `#29572 `__: ENH: enable processing object file for f2py meson backend +* `#29574 `__: TYP: add ``ndmax`` parameter to ``np.array`` +* `#29579 `__: BLD: wire up ``ASIMDDP`` feature to ``ARM_FEATURES`` +* `#29582 `__: DOC: Add link to homepage in doc landing page +* `#29585 `__: TST: update link and version for Intel SDE download +* `#29586 `__: TYP: add ``sorted`` kwarg to ``unique`` +* `#29588 `__: DOC: Make the image credit author link clickable +* `#29589 `__: MAINT: Bump actions/dependency-review-action from 4.7.1 to 4.7.2 +* `#29590 `__: MAINT: Bump github/codeql-action from 3.29.9 to 3.29.10 +* `#29594 `__: TYP: Add defaults to ``numpy/core`` and ``numpy/__init__.py`` +* `#29596 `__: TST: Replace xunit setup with methods +* `#29598 `__: BUG: fix for evaluation of random_f and random_standard_cauchy... +* `#29601 `__: DOC: fix for f2py migrating-to-meson page +* `#29602 `__: MAINT: Bump pypa/cibuildwheel from 3.1.3 to 3.1.4 +* `#29604 `__: DOC: Fix typo in tril_indices and triu_indices docstrings +* `#29605 `__: TST: Replace xunit setup with methods +* `#29607 `__: TST: Enable unit tests for RISC-V CPU dispatcher utilities +* `#29608 `__: TYP: ndarray.fill() takes no keyword arguments +* `#29609 `__: BUG: fix negative samples generated by Wald distribution +* `#29611 `__: CI: more specific mypy_primer ``on:`` paths +* `#29612 `__: CI: replace comment-hider action in mypy_primer workflow +* `#29615 `__: MAINT: Bump github/codeql-action from 3.29.10 to 3.29.11 +* `#29616 `__: TST: Replace xunit setup with methods +* `#29617 `__: DOC: Correct a few formatting issues +* `#29618 `__: MAINT: fix typo in cmds.py +* `#29621 `__: ENH: Extend coverage for benchmark of np.unique +* `#29628 `__: TST: Replace xunit setup with methods +* `#29629 `__: TYP: replace scalar type ``__init__`` with ``__new__`` +* `#29630 `__: TYP: fix slightly incorrect ``memoryview`` type argument in ``ScalarType`` +* `#29631 `__: TYP: Make ``datetime64`` a generic type at runtime +* `#29633 `__: TYP: add missing ``_NoValue`` annotations in ``_core.fromnumeric`` +* `#29634 `__: TYP: Add missing defaults to stubs +* `#29636 `__: MAINT: Bump actions/dependency-review-action from 4.7.2 to 4.7.3 +* `#29641 `__: TST: Replace xunit setup with methods +* `#29642 `__: ENH: Add extended sorting APIs +* `#29646 `__: DOC: Fix typo in basics.strings.rst +* `#29648 `__: TST: delete global env_setup fixture +* `#29649 `__: BUG: avoid thread-unsafe refcount check in temp elision +* `#29653 `__: MAINT: Bump github/codeql-action from 3.29.11 to 3.30.0 +* `#29654 `__: MAINT: Add Linux Foundation Health Badge to README +* `#29655 `__: DOC: clarify numpy.asarray, numpy.asanyarray, numpy.asarray_chkfinite... +* `#29656 `__: ENH: Improve performance of numpy scalar __copy__ and __deepcopy__ +* `#29657 `__: TST: Replace xunit setup with methods +* `#29658 `__: MAINT: Optimize the logical implementation for RISC-V based on... +* `#29662 `__: BLD: Add missing include +* `#29665 `__: BUG: use correct input dtype in flatiter indexed assignment +* `#29666 `__: TST: Replace xunit setup with methods +* `#29667 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.5 to 2.0.6 +* `#29669 `__: MAINT: Bump actions/github-script from 7.0.1 to 8.0.0 +* `#29670 `__: MAINT: Bump actions/setup-python from 5.6.0 to 6.0.0 +* `#29671 `__: TST: Replace test_smoke xunit setup with methods +* `#29678 `__: BUG, BLD: Correct regex for ppc64 VSX3/VSX4 feature detection +* `#29686 `__: MAINT: Bump github/codeql-action from 3.30.0 to 3.30.1 +* `#29692 `__: TST: Replace test_deprecations setup/teardown with context manager +* `#29693 `__: TST: xfail test_kind::test_quad_precision on AIX/PPC +* `#29695 `__: TYP: fix ``np.bool`` method declarations +* `#29697 `__: BUG: Correct ambiguous logic for ``s390x`` CPU feature detection +* `#29704 `__: BLD: Add missing +* `#29706 `__: TYP: fix ``np.number`` and ``np.\*integer`` method declaration +* `#29713 `__: MAINT: update spin to 0.14 in requirements files +* `#29714 `__: TST: update test_regression::test_gph25784 +* `#29715 `__: BUG: Fix ``dtype`` refcount in ``__array__`` +* `#29716 `__: BUG: standardize 'Mean of empty slice' inconsistent message #29711 +* `#29718 `__: TST: not to include the LONGDOUBLE test on AIX +* `#29723 `__: MAINT: Bump github/codeql-action from 3.30.1 to 3.30.2 +* `#29726 `__: MAINT: Update main after 2.3.3 release. +* `#29729 `__: TST: Fix np.random thread test failures +* `#29730 `__: BLD: enable x86-simd-sort to build on KNL with -mavx512f +* `#29732 `__: DOC: update documentation on how to prepare and do a release +* `#29733 `__: TYP: fix method declarations in ``floating``\ , ``timedelta64``\... +* `#29734 `__: TYP: fix ``ndarray.strides`` decorator order +* `#29735 `__: MAINT: Bump actions/checkout from 4.2.2 to 5.0.0 +* `#29736 `__: TYP: sort out some of the ``# type: ignore`` comments in ``__init__.pyi`` +* `#29737 `__: ENH, API: New sorting slots for DType API +* `#29739 `__: TYP: Remove ``None`` from definition of ``DTypeLike`` type alias +* `#29740 `__: TST: disable overflow exception test of numpy.power on AIX +* `#29741 `__: MAINT: Bump github/codeql-action from 3.30.2 to 3.30.3 +* `#29743 `__: MAINT: delete unused variables in unary logical dispatch +* `#29744 `__: TST: Simplify and clarify StringDType testing support utilities +* `#29745 `__: BUG: Fix max_depth validation condition in PyArray_FromAny_int +* `#29749 `__: TYP: mypy 1.18.1 +* `#29750 `__: BLD: change file extension for libnpymath on win-arm64 from .a... +* `#29751 `__: ENH: implement powersort merge-policy for argsort +* `#29753 `__: DOC: Fix typo in absolute_beginners.rst +* `#29754 `__: MAINT: pin asv<0.6.5 +* `#29755 `__: DOC: Clarify description of diagonal covariance in multivariate_normal... +* `#29757 `__: DOC: add dev docs on C debuggers and compiler sanitizers +* `#29760 `__: DOC: Improve documentation for f2py and Meson usage, add ufunc... +* `#29761 `__: BUG: Stable ``ScalarType`` ordering +* `#29764 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#29767 `__: DOC: add another mention of 'same_value' +* `#29768 `__: BUG: Fix pocketfft umath strides for powerpc compatibility +* `#29773 `__: DOC: Correct a typo in Troubleshooting guidelines +* `#29774 `__: BUG: np.setbufsize should raise ValueError for negative input +* `#29775 `__: DOC: Link cross references in numpy documentation +* `#29776 `__: TYP: fix and improve ``{f,i}info`` stubs in ``_core.getlimits`` +* `#29777 `__: BLD: Upgrade spin requirement to version 0.15 +* `#29780 `__: BUG: Fix assert in nditer buffer setup +* `#29794 `__: MAINT: Bump pypa/cibuildwheel from 3.1.4 to 3.2.0 +* `#29796 `__: TST: clarify logic in float_alias_names test +* `#29802 `__: MAINT: Remove xfail and deprecation filter from a test. +* `#29803 `__: DOC: Improve the formatting of Random Generator documentation +* `#29806 `__: MAINT: Bump actions/cache from 4.2.4 to 4.3.0 +* `#29811 `__: BUG: linalg: emit a MemoryError on a malloc failure +* `#29812 `__: BLD: refactor to avoid 'unused function' warnings' +* `#29813 `__: ENH: add warning when calling ufunc with 'where' and without... +* `#29814 `__: MAINT: Bump github/codeql-action from 3.30.3 to 3.30.4 +* `#29815 `__: DOC: Update docstring for ``count_nonzero`` +* `#29816 `__: TST: Mark thread-unsafe tests +* `#29817 `__: MAINT: Bump actions/dependency-review-action from 4.7.3 to 4.8.0 +* `#29819 `__: ENH: Add fast path in ufuncs for numerical scalars. +* `#29823 `__: ENH: cleanup warning generation and unmark xfailed tests +* `#29831 `__: MAINT: Bump github/codeql-action from 3.30.4 to 3.30.5 +* `#29832 `__: MAINT: Bump int128/hide-comment-action from 1.43.0 to 1.44.0 +* `#29833 `__: TST: Pin pyparsing to avoid matplotlib errors. +* `#29836 `__: ENH, FEAT: Reorganize finfo and add new constant slot +* `#29837 `__: ENH: speedup numpy.quantile when weights are provided +* `#29841 `__: DEP: Raise ``TypeError`` on attempt to convert array with ``ndim``... +* `#29842 `__: TYP: Fix ``generic.__new__`` return type +* `#29843 `__: TYP: remove unused ``# type: ignore``\ s +* `#29844 `__: TYP: fix ``testing.assert_warns`` decorator order +* `#29845 `__: TYP: Add missing ``rtol`` kwarg to ``linalg.pinv`` +* `#29846 `__: TYP: Fix signatures of ``linalg.matmul`` and ``linalg.outer`` +* `#29847 `__: TYP: Fix incompatible defaults in ``polyfit``\ , ``histogram``\... +* `#29848 `__: MAINT, TYP: bump ``mypy`` to ``1.18.2`` +* `#29849 `__: CI: Use ``uv`` instead of ``pip`` in the mypy workflow +* `#29852 `__: DOC: Add a few missing commas in math operations +* `#29854 `__: MAINT: Bump ossf/scorecard-action from 2.4.2 to 2.4.3 +* `#29856 `__: CI: Try to fix loongarch64 CI +* `#29858 `__: TST: Make temporary file usage thread safe +* `#29861 `__: MAINT: Add Cython linter to spin +* `#29862 `__: TYP: Improved ``ndarray`` augmented assignment operators +* `#29866 `__: MAINT: Bump github/codeql-action from 3.30.5 to 3.30.6 +* `#29867 `__: TST: Fix misc thread unsafe data races +* `#29868 `__: BLD: Fix MSVC warnings and add CI check with allowlist +* `#29869 `__: DOC: Add warning and examples for sliding_window_view +* `#29872 `__: ENH: Set DLPack tensor ``shape`` and ``strides`` to NULL iff... +* `#29875 `__: PERF: Intern strings used to build global tuples. +* `#29880 `__: MAINT: Rewrite setitem to use the new API (mostly) +* `#29882 `__: DOC: Remove unused arrays from the structured dtype ufunc example. +* `#29883 `__: DOC: Add note on meson buildtype for debug builds +* `#29885 `__: MAINT: Simplify string arena growth strategy +* `#29886 `__: CI: macos-13 --> macos-15-intel +* `#29889 `__: DOC: Documentation related finfo refactors and new slot addition +* `#29891 `__: MAINT: Bump github/codeql-action from 3.30.6 to 4.30.7 +* `#29892 `__: DOC: Add Plausible analytics to the NumPy documentation +* `#29893 `__: BUG: Fix INT_MIN % -1 to return 0 for all signed integer types +* `#29895 `__: MAINT: Bump astral-sh/setup-uv from 6.8.0 to 7.0.0 +* `#29897 `__: BUG: Fixup float16 conversion error path and add tests +* `#29899 `__: BUG: Ensure backwards compatibility for patching finfo +* `#29900 `__: ENH: Add registration for sorting loops using new ufunc convenience... +* `#29901 `__: TYP: add missing ``__slots__`` +* `#29902 `__: TYP: wrong argument defaults in ``testing._private`` +* `#29903 `__: TYP: fix incorrect ``ma.sort`` arg default for ``stable`` +* `#29904 `__: MAINT: bump ``ruff`` from ``0.12.0`` to ``0.14.0`` +* `#29905 `__: TYP: Parameters with missing default value +* `#29906 `__: TST: do not use matplotlib 3.10.6 +* `#29908 `__: BUG: Fix float16-sort failures on 32-bit x86 MSVC +* `#29909 `__: DEP: finalize deprecation of numpy/fft.helpers and numpy.linalg.linalg... +* `#29912 `__: DOC: Add URL to valgrind tool in Advanced Debugging Page +* `#29914 `__: TYP: minor fixes related to ``errstate`` +* `#29915 `__: TYP: move ``matrix`` from ``__init__.pyi`` to ``matrixlib/defmatrix.pyi`` +* `#29917 `__: Fix memory leak in import_array() +* `#29919 `__: TST: use requirements/test_requirements across CI +* `#29924 `__: MAINT: Bump github/codeql-action from 4.30.7 to 4.30.8 +* `#29925 `__: MAINT: Avoid assumptions about how memory is allocated +* `#29927 `__: TST: Add unit test for RISC-V CPU features +* `#29930 `__: DOC: Completed and fixed PR #29578 +* `#29931 `__: ENH: In spec registration, allow looking up ufuncs in any module. +* `#29934 `__: CI: Use POWER10 GHA runner for NumPy test jobs +* `#29935 `__: CI: Run mypy on Python 3.14 and ignore more paths +* `#29936 `__: MAINT: Bump astral-sh/setup-uv from 7.0.0 to 7.1.0 +* `#29937 `__: MAINT: Bump pypa/cibuildwheel from 3.2.0 to 3.2.1 +* `#29938 `__: MAINT: Bump int128/hide-comment-action from 1.44.0 to 1.46.0 +* `#29939 `__: MAINT: Bump actions/dependency-review-action from 4.8.0 to 4.8.1 +* `#29942 `__: TST: Convert mixed_types_structured to method +* `#29944 `__: BUG: Fix np.strings.slice if stop=None or start and stop >= len +* `#29947 `__: BUG: support axis sequence in ``np.trim_zeros`` +* `#29948 `__: STY: rename ``@classmethod`` arg to ``cls`` +* `#29951 `__: MAINT: replace use of ``asanyarray`` with ``out=...`` to keep... +* `#29952 `__: TYP: add ``__class_getitem__`` to ``bool`` and ``datetime64`` +* `#29954 `__: TYP: fix inconsistent ``float64.__getformat__`` stub +* `#29956 `__: TYP: fix return type annotation for normalize_axis_tuple utility +* `#29957 `__: TST: Remove recwarn from tests +* `#29958 `__: TYP: Fix inconsistent ``__all__`` stubs +* `#29959 `__: TYP: stub ``numpy.ma.testutils`` +* `#29960 `__: DOC: fix formatting in ``np.percentile`` docstring +* `#29961 `__: TYP: update the ``finfo`` stubs +* `#29962 `__: MAINT: remove obsolete ``generic.tostring`` method descriptor... +* `#29963 `__: MAINT: Remove removed array methods +* `#29964 `__: TYP: add missing ``generic`` methods +* `#29965 `__: TYP: mark ``flexible`` as ``@final`` +* `#29966 `__: TYP: minor fixes in ``__pow__`` methods +* `#29967 `__: TYP: improved ``busdaycalendar`` annotations +* `#29968 `__: TYP: missing ``vectorize`` default argument +* `#29969 `__: TYP: fix stubtest errors in ``lib._function_base_impl`` +* `#29970 `__: BUG: Fix resize when it contains references +* `#29971 `__: TYP: update ``ScalarType`` type +* `#29972 `__: TYP: expand ``TypedDict`` kwargs in ``full`` to appease stubtest +* `#29973 `__: DEP: Remove deprecated ``interpolation`` parameter from quantile/percentile +* `#29976 `__: TYP: fix ``random.Generator.shuffle`` input type +* `#29978 `__: DEP: remove ``in1d`` +* `#29980 `__: DEP: remove ``ndindex.ndincr`` (deprecated since 1.20) +* `#29981 `__: TYP: change ``ndenumerate.__new__`` into ``__init__`` +* `#29982 `__: TYP: change ``nditer.__new__`` into ``__init__`` and tighten... +* `#29983 `__: TYP: minor fixes and improvements in ``record`` and ``recarray`` +* `#29984 `__: DEP: remove the ``fix_imports`` parameter from ``save()`` +* `#29985 `__: MAINT: Remove ``_core.MachAr`` remnants +* `#29986 `__: DEP: Remove ``ndarray.ctypes.get_\*`` methods (deprecated since... +* `#29988 `__: MAINT: remove remnants of ``linalg.linalg`` and ``fft.helper`` +* `#29989 `__: BUG: Fix np.strings.slice if start > stop +* `#29991 `__: TYP: some minor fixes for the constants in ``_core.multiarray`` +* `#29992 `__: DOC: update SIMD build options to cover riscv64 +* `#29993 `__: MAINT: avoid namespace pollution in ``_core._type_aliases`` +* `#29994 `__: DEP: remove the ``newshape`` parameter from ``reshape()`` +* `#29996 `__: MAINT: Update main after the NumPy 2.3.4 release. +* `#29997 `__: MAINT: remove deprecated in numpy/lib/_function_base_impl.py +* `#29998 `__: MAINT: Update write_release.py +* `#29999 `__: TYP: fix ``char.startswith`` signature +* `#30000 `__: ENH: Add ``stable`` kwarg to ``chararray.argsort`` +* `#30001 `__: TYP: fix ``ndarray.sort(stable=True)`` +* `#30002 `__: TYP: inconsistent ``strings.slice`` default argument for ``stop`` +* `#30003 `__: TYP: remove implicit re-export in ``_core._exceptions`` +* `#30004 `__: TYP: stub ``MesonTemplate.objects_substitution()`` in ``f2py._backends._meson`` +* `#30005 `__: CI, TST: Enable parallel threads testing in macOS CI job +* `#30006 `__: TYP: fix stubtest errors in ``numpy.lib.\*`` +* `#30007 `__: MAINT: Remove ``NDArrayOperatorsMixin.um`` class attribute ``umath``... +* `#30008 `__: DOC: Add concrete Meson build example for NumPy C ufunc extension +* `#30009 `__: TYP: update ``corrcoef`` signature +* `#30011 `__: TYP: ``linalg.svdvals``\ : fix inconsistent signature and add... +* `#30012 `__: MAINT: remove confusing parameter default for ``shape`` in ``reshape`` +* `#30013 `__: TYP: ``linalg.tensordot``\ : fix inconsistent signature and simplify... +* `#30014 `__: TYP: stub ``linalg.lapack_lite.LapackError`` +* `#30015 `__: MAINT: Bump github/codeql-action from 4.30.8 to 4.30.9 +* `#30018 `__: TYP: fix stubtest errors in ``numpy.ma`` +* `#30019 `__: MAINT, TST: Increase tolerance in fft test. +* `#30020 `__: DOC: Correct typos in numpy API documentation +* `#30021 `__: DEP: Remove ``delimitor`` kwarg from ``ma.mrecords.fromtextfile`` +* `#30030 `__: MAINT: Bump astral-sh/setup-uv from 7.1.0 to 7.1.1 +* `#30031 `__: TYP: fix stubtest errors in ``numpy.polynomial.\*`` +* `#30032 `__: TYP: ``testing.check_support_sve``\ : fix inconsistent parameter... +* `#30033 `__: TYP: fix stubtest error in ``numpy.typing`` +* `#30034 `__: TYP: Add type annotations for ASIMD, NEON, and RVV targets +* `#30035 `__: DEV: add a ``spin stubtest`` command +* `#30036 `__: TYP: restore abstract scalar type constructor parameters +* `#30039 `__: DEV: Set correct ``PYTHONPATH`` in ``spin stubtest`` +* `#30040 `__: DOC: Clarify signed vs unsigned ``intptr_t`` vs ``uintptr_t``... +* `#30043 `__: CI, TYP: stubtest +* `#30044 `__: MAINT: bump ``hypothesis`` to ``6.142.2`` +* `#30045 `__: DEV: separate stubtest allowlist for py312+ +* `#30049 `__: BLD: update scipy-openblas, use -Dpkg_config_path +* `#30050 `__: CI: Skip test runs if all changes are docs or stubs +* `#30051 `__: CI: Python 3.14 stable +* `#30052 `__: TYP, STY: ``polynomial``\ : reformat the stubs +* `#30053 `__: TYP: Type-checking the stubs +* `#30054 `__: BUG: allow division between object-dtype arrays and timedelta... +* `#30055 `__: TYP: Annotate ``ma.array``\ , ``ma.asarray``\ , and ``ma.asanyarray`` +* `#30057 `__: DOC: Remove nonexistent ``order`` parameter docs of ``ma.asanyarray`` +* `#30058 `__: BUG: fix int left shift UB in CPU feature detection +* `#30060 `__: TYP: ``polynomial.polyutils``\ : fix callable type signatures +* `#30061 `__: CI, TYP: Fix stubtest CI failures on py311 +* `#30064 `__: TST: Remove unnecessary test__datasource thread-unsafe markers +* `#30065 `__: TYP: ``polynomial``\ : Simplify ``chebpts{1,2}`` function stubs +* `#30067 `__: TYP: ``numpy.ma``\ : Annotate 27 functions related to masks and... +* `#30068 `__: MAINT: remove deprecated ``style`` argument and deprecations... +* `#30071 `__: BUG: avoid negating INT_MIN in PyArray_Round implementation +* `#30073 `__: DOC: Correct a typo: an 1d -> a 1d +* `#30074 `__: DOC: Fix a couple typos in generalized-ufuncs.rst. +* `#30077 `__: BUG: prefer passing a pointer to the helper function to avoid... +* `#30080 `__: MAINT: Bump actions/upload-artifact from 4.6.2 to 5.0.0 +* `#30081 `__: MAINT: Bump github/codeql-action from 4.30.9 to 4.31.0 +* `#30082 `__: MAINT: Bump astral-sh/setup-uv from 7.1.1 to 7.1.2 +* `#30083 `__: DOC: Fix the first small 'process_core_dims()' example. +* `#30084 `__: TYP: ``numpy.ma``\ : Annotate the callable wrapper classes +* `#30091 `__: BUG, TYP: Fix ``ma.core._frommethod`` function signatures +* `#30093 `__: DOC: Correct grammatical usage like a/an +* `#30097 `__: CI: Update ARM job (armhf_test) to use Ubuntu 24.04 +* `#30099 `__: BUG, TYP: Fix ``ma.core._convert2ma`` function signatures +* `#30100 `__: BLD: use blobless checkout on CircleCI +* `#30101 `__: TST: Add thread-safe testing guidelines +* `#30102 `__: ENH: Make FPE blas check a runtime check for all apple arm systems +* `#30104 `__: BUG, TYP: ufunc method signatures +* `#30106 `__: MAINT: Bump github/codeql-action from 4.31.0 to 4.31.2 +* `#30108 `__: TYP: shape-type-aware ``swapaxes`` +* `#30111 `__: DOC: Add a plot to the 'unwrap' docstring. +* `#30114 `__: BUG, TYP: ``ndarray`` method runtime signatures +* `#30118 `__: CI: disable flaky ubuntu UBsan CI job +* `#30121 `__: BUG, TYP: scalar-type constructor runtime signatures +* `#30124 `__: BUG, TYP: ``flatiter`` method runtime signatures, and better... +* `#30125 `__: BUG: Fix handling by ``unique`` of signed zero in complex types. +* `#30126 `__: BUG: ``nditer`` runtime signatures +* `#30127 `__: DOC: remove outdated notes on how to build against numpy in conda-forge +* `#30128 `__: BUG: Avoid compilation error of wrapper file generated with SWIG... +* `#30132 `__: BLD: use scipy-openblas 0.3.30.7 +* `#30137 `__: BUG: ``broadcast`` runtime signatures +* `#30138 `__: BUG: array construction function runtime signatures +* `#30139 `__: MAINT,BUG: make later arguments in array2string keyword only. +* `#30140 `__: BUG, DOC, TYP: ``empty`` and ``zeros`` runtime signatures, and... +* `#30141 `__: MAINT: fix math markup (\times -> \\times) in numpy.linalg.multidot... +* `#30142 `__: MAINT: Migrate einsum.c.src to C++ (einsum.cpp) +* `#30143 `__: BUG, TYP: ``_core.multiarray.\*`` function runtime signatures +* `#30147 `__: BUG, TYP: add the remaining ``_core.multiarray`` function runtime... +* `#30148 `__: DOC: Fix Returns section formatting in linalg.qr and linalg.svd +* `#30149 `__: MAINT: Not show signature in git_version +* `#30153 `__: BUG: decref on error in PyArray_NewFromDescr (#30152) +* `#30154 `__: BUG: update requires to requirements in numpy.multiarray see... +* `#30155 `__: BUG, DOC: ``ndarray`` dunder method runtime signatures and missing... +* `#30160 `__: TYP: fix an invalid default value for ``array``\ 's ``ndmax``... +* `#30161 `__: ENH: Run SWIG unit tests in CI action +* `#30163 `__: ENH: Add ``order`` parameter to ``np.ma.asanyarray`` +* `#30164 `__: BUG: ``numpy.random.\*`` class runtime signatures +* `#30165 `__: MAINT: some ``numpy.polynomial.\*`` namespace pollution cleanup +* `#30166 `__: CI: add check for numpy-release version of scipy-openblas +* `#30168 `__: TYP, DEP: ``numpy.fix`` pending deprecation +* `#30169 `__: BUG: ``np.dtype`` and ``np.dtypes.\*`` runtime signatures +* `#30170 `__: ENH: Reduce compute time for ``tobytes`` in non-contiguos paths +* `#30175 `__: ENH: Updates for the ``spin bench`` command. +* `#30176 `__: BUG: Fix check of PyMem_Calloc return value. +* `#30179 `__: MAINT,API: Introduce __numpy_dtype__ and fix dtype attribute... +* `#30183 `__: DOC: Corrected grammatical issues in code comments +* `#30190 `__: MAINT: ``ma.asanyarray``\ : use ``order=None`` as default +* `#30191 `__: MAINT: Bump int128/hide-comment-action from 1.46.0 to 1.47.0 +* `#30193 `__: BUG, DOC: ``np.generic`` missing method runtime signatures and... +* `#30196 `__: DOC: Fix some broken refs and Typos. +* `#30197 `__: ENH,MAINT: rewrite np.fix to use np.trunc internally +* `#30199 `__: DOC: update result_type docs to link to promotion rules +* `#30201 `__: ENH: Detect Fortran vs C order in array_assign_boolean_subscript +* `#30202 `__: MAINT: Bump actions/dependency-review-action from 4.8.1 to 4.8.2 +* `#30203 `__: MAINT: Bump astral-sh/setup-uv from 7.1.2 to 7.1.3 +* `#30206 `__: DOC: an Mercurial -> a Mercurial +* `#30208 `__: DOC: Release notes for the runtime signature changes +* `#30209 `__: MAINT: Bump pypa/cibuildwheel from 3.2.1 to 3.3.0 +* `#30211 `__: ENH: ``ufunc.__signature__`` +* `#30213 `__: DOC: fix links for newly rebuilt numpy-tutorials site +* `#30214 `__: BUG: Fix build on s390x with clang +* `#30219 `__: MAINT: Bump github/codeql-action from 4.31.2 to 4.31.3 +* `#30221 `__: TYP: Annotate remaining ``ma.MaskedArray`` methods +* `#30222 `__: CI: remove (mainly windows) jobs from Azure pipelines +* `#30223 `__: STY: fix ``ma.MaskedAArray.tolist`` docstring indentation +* `#30224 `__: TYP: ``ravel``\ : less awkward return types +* `#30226 `__: TYP: stub ``ma.core.get_masked_subclass`` +* `#30227 `__: CI: fixes https://github.com/numpy/numpy/security/code-scanning/364 +* `#30228 `__: BUG: fix data race in ``wrapping_auxdata_freelist`` by making... +* `#30229 `__: ENH, TYP: transparent ``ma.extras._fromnxfunction`` runtime signatures +* `#30231 `__: TYP: Shape-typing in ``lib._twodim_base_impl`` +* `#30232 `__: CI: removes azure pipelines +* `#30233 `__: TYP: ``_core.numeric``\ : shape-typing and fixed overlapping... +* `#30234 `__: BUG: fix data race in ``PyArray_DescrHash`` +* `#30235 `__: MAINT: undo change to ``fromstring`` text signature for 2.4.0 +* `#30239 `__: DOC: Correct an equation error in ``numpy.random.Generator.pareto`` +* `#30242 `__: BUG: fix einsum ``optimize=True`` parsing error +* `#30243 `__: BUG: Add missing ``PyErr_Occurred()`` check to fast-path +* `#30246 `__: TYP: ``lib._function_base_impl``\ : many typing improvements +* `#30247 `__: DOC: Update wording in numpy.coremath +* `#30248 `__: DOC: remove mention of 'skip azp' since we no longer use azure +* `#30252 `__: MAINT: Bump actions/checkout from 5.0.0 to 5.0.1 +* `#30253 `__: MAINT: Bump github/codeql-action from 4.31.3 to 4.31.4 +* `#30255 `__: BUG: always ignore FPE when Accelerate is the BLAS backend +* `#30256 `__: CI: update ``paths-ignore`` for mypy and wheels workflows +* `#30259 `__: TST: mark tests which call ``gc.collect()`` as thread unsafe +* `#30261 `__: TYP: fix shape-type of structured array fields +* `#30263 `__: TST: scalar fast path multithreaded test +* `#30266 `__: ENH: New-style sorting for StringDType +* `#30270 `__: ENH: Use descriptor rather than custom ``tp_getattro`` +* `#30271 `__: TST: Join threads in ``test_printoptions_thread_safety`` +* `#30273 `__: MAINT: Bump actions/checkout from 5.0.1 to 6.0.0 +* `#30276 `__: MAINT: Bump astral-sh/setup-uv from 7.1.3 to 7.1.4 +* `#30277 `__: BUG: Fix misleading ValueError in convolve on empty inputs due... +* `#30278 `__: BUG: fix np.resize refchk on python 3.14 +* `#30279 `__: MAINT: refactor unary temporary elision check +* `#30282 `__: DEP, TYP: ``ndarray.shape`` setter pending deprecation +* `#30284 `__: DEP: deprecate ``numpy.lib.user_array.container`` +* `#30286 `__: MAINT: add ``matmul`` to ``_core.umath.__all__`` +* `#30288 `__: MAINT: Bump github/codeql-action from 4.31.4 to 4.31.5 +* `#30289 `__: TYP: ``_core.overrides.set_module`` implicit re-export +* `#30290 `__: TYP: move the ``normalize_axis_\*`` function definitions from... +* `#30291 `__: TYP: ``lib._function_base_impl._quantile_ureduce_func`` inline... +* `#30293 `__: TYP: move ``vectorize`` stubs to ``lib._function_base_impl`` +* `#30294 `__: TYP: ``_core.\*``\ : stubs for some private functions and constants +* `#30295 `__: MAINT: remove ``lib._shape_base_impl._replace_zero_by_x_arrays`` +* `#30296 `__: TYP: ``lib.\*``\ : stubs for some private functions used by ``_function_base_imp``... +* `#30297 `__: MAINT: ``broadcast_shapes``\ : update presumed ``NPY_MAXARGS``... +* `#30300 `__: BUG: Fix RecursionError and raise ValueError for unmatched parentheses +* `#30303 `__: MAINT: Bump actions/setup-python from 6.0.0 to 6.1.0 +* `#30310 `__: MAINT: avoid unused variable warnings in dtype tests +* `#30312 `__: MAINT: Implement some RAII classes and use them in stringdtype/casts.cpp +* `#30313 `__: DOC: record a data -> record a data point +* `#30314 `__: BUG: Fix descriptor changes related build/parse value issues... +* `#30318 `__: DOC: Fix duplicate ``import pytest`` in testing documentation... +* `#30321 `__: TYP: ``__numpy_dtype__`` +* `#30324 `__: TYP: ``ndenumerate`` generic type parameter default +* `#30325 `__: DOC, TYP: Expand the 2.3 ``numpy.typing`` deprecation docs +* `#30326 `__: TYP: ``ma.mrecords.MaskedRecords`` generic type parameter defaults +* `#30327 `__: TYP: ``_core._umath_tests`` module stubs +* `#30347 `__: REL: Prepare for the NumPy 2.4.0rc1 release +* `#30377 `__: MAINT: don't assert RecursionError in monster dtype test (#30375) +* `#30378 `__: CI: bump FreeBSD from 14.2 to 14.3 +* `#30398 `__: MAINT: Use RAII objects in unique.cpp to ensure safe resource... +* `#30399 `__: BUG: raise BufferError when creating dlpack with wrong device... +* `#30400 `__: BUG: fix free-threaded races in RandomState +* `#30401 `__: BUG: fix reduction issue in weighted quantile (#30070) +* `#30403 `__: SIMD, BLD: Fix Highway target attribute build failure on ppc64... +* `#30408 `__: BUG: Add missing return status check of NpyIter_EnableExternalLoop()... +* `#30419 `__: DOC: Improve cross-links in thread safety documentation (#30373) +* `#30420 `__: BUG: fix double evaluation in PyArrayScalar_RETURN_BOOL_FROM_LONG... +* `#30432 `__: BUG: fix remaining data races in mtrand.pyx (#30426) +* `#30459 `__: TYP: restore ``generic.__hash__`` (#30456) diff --git a/doc/changelog/2.4.1-changelog.rst b/doc/changelog/2.4.1-changelog.rst new file mode 100644 index 000000000000..3cf0d8ad0ec5 --- /dev/null +++ b/doc/changelog/2.4.1-changelog.rst @@ -0,0 +1,37 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alexander Shadchin +* Bill Tompkins + +* Charles Harris +* Joren Hammudoglu +* Marten van Kerkwijk +* Nathan Goldbaum +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg + +Pull requests merged +==================== + +A total of 15 pull requests were merged for this release. + +* `#30490 `__: MAINT: Prepare 2.4.x for further development +* `#30503 `__: DOC: ``numpy.select``\ : fix ``default`` parameter docstring... +* `#30504 `__: REV: Revert part of #30164 (#30500) +* `#30506 `__: TYP: ``numpy.select``\ : allow passing array-like ``default``... +* `#30507 `__: MNT: use if constexpr for compile-time branch selection +* `#30513 `__: BUG: Fix leak in flat assignment iterator +* `#30516 `__: BUG: fix heap overflow in fixed-width string multiply (#30511) +* `#30523 `__: BUG: Ensure summed weights returned by np.average always are... +* `#30527 `__: TYP: Fix return type of histogram2d +* `#30594 `__: MAINT: avoid passing ints to random functions that take double... +* `#30595 `__: BLD: Avoiding conflict with pygit2 for static build +* `#30596 `__: MAINT: Fix msvccompiler missing error on FreeBSD +* `#30608 `__: BLD: update vendored Meson to 1.9.2 +* `#30620 `__: ENH: use more fine-grained critical sections in array coercion... +* `#30623 `__: BUG: Undo result type change of quantile/percentile but keep... diff --git a/doc/changelog/2.4.2-changelog.rst b/doc/changelog/2.4.2-changelog.rst new file mode 100644 index 000000000000..06d50fa5e8f0 --- /dev/null +++ b/doc/changelog/2.4.2-changelog.rst @@ -0,0 +1,35 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Daniel Tang + +* Joren Hammudoglu +* Kumar Aditya +* Matti Picus +* Nathan Goldbaum +* Ralf Gommers +* Sebastian Berg +* Vikram Kumar + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#30629 `__: MAINT: Prepare 2.4.x for further development +* `#30636 `__: TYP: ``arange``\ : accept datetime strings +* `#30657 `__: MAINT: avoid possible race condition by not touching ``os.environ``... +* `#30700 `__: BUG: validate contraction axes in tensordot (#30521) +* `#30701 `__: DOC: __array_namespace__info__: set_module not __module__ (#30679) +* `#30702 `__: BUG: fix free-threaded PyObject layout in replace_scalar_type_names... +* `#30703 `__: TST: fix limited API example in tests for latest Cython +* `#30709 `__: BUG: Fix some bugs found via valgrind (#30680) +* `#30712 `__: MAINT: replace ob_type access with Py_TYPE in PyArray_CheckExact +* `#30713 `__: BUG: Fixup the quantile promotion fixup +* `#30736 `__: BUG: fix thread safety of ``array_getbuffer`` (#30667) +* `#30737 `__: backport scipy-openblas version change + diff --git a/doc/changelog/2.4.3-changelog.rst b/doc/changelog/2.4.3-changelog.rst new file mode 100644 index 000000000000..c927575ce807 --- /dev/null +++ b/doc/changelog/2.4.3-changelog.rst @@ -0,0 +1,39 @@ + +Contributors +============ + +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Antareep Sarkar + +* Charles Harris +* Joren Hammudoglu +* Matthieu Darbois +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Pieter Eendebak +* Sebastian Berg +* Warren Weckesser +* stratakis + + +Pull requests merged +==================== + +A total of 14 pull requests were merged for this release. + +* `#30759 `__: MAINT: Prepare 2.4.x for further development +* `#30827 `__: BUG: Fix some leaks found via LeakSanitizer (#30756) +* `#30841 `__: MAINT: Synchronize 2.4.x submodules with main +* `#30849 `__: TYP: ``matlib``\ : missing extended precision imports +* `#30850 `__: BUG: Fix weak hash function in np.isin(). (#30840) +* `#30921 `__: BUG: fix infinite recursion in np.ma.flatten_structured_array... +* `#30922 `__: BUG: Fix buffer overrun in CPU baseline validation (#30877) +* `#30923 `__: BUG: Fix busdaycalendar's handling of a bool array weekmask.... +* `#30924 `__: BUG: Fix reference leaks and NULL pointer dereferences (#30908) +* `#30925 `__: MAINT: fix two minor issues noticed when touching the C API setup +* `#30955 `__: ENH: Test .kind not .char in np.testing.assert_equal (#30879) +* `#30957 `__: BUG: fix type issues in uses if PyDataType macros +* `#30958 `__: MAINT: Don't use vulture 2.15, it has false positives +* `#30973 `__: MAINT: update openblas (#30961) + diff --git a/doc/changelog/2.4.4-changelog.rst b/doc/changelog/2.4.4-changelog.rst new file mode 100644 index 000000000000..110c9e445f8a --- /dev/null +++ b/doc/changelog/2.4.4-changelog.rst @@ -0,0 +1,29 @@ + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Daniel Haag + +* Denis Prokopenko + +* Harshith J + +* Koki Watanabe +* Marten van Kerkwijk +* Matti Picus +* Nathan Goldbaum + +Pull requests merged +==================== + +A total of 7 pull requests were merged for this release. + +* `#30978 `__: MAINT: Prepare 2.4.x for further development +* `#31049 `__: BUG: Add test to reproduce problem described in #30816 (#30818) +* `#31052 `__: BUG: fix FNV-1a 64-bit selection by using NPY_SIZEOF_UINTP (#31035) +* `#31053 `__: BUG: avoid warning on ufunc with where=True and no output +* `#31058 `__: DOC: document caveats of ndarray.resize on 3.14 and newer +* `#31079 `__: TST: fix POWER VSX feature mapping (#30801) +* `#31084 `__: MAINT: numpy.i: Replace deprecated ``sprintf`` with ``snprintf``... + diff --git a/doc/neps/conf.py b/doc/neps/conf.py index 33faaf17ff64..056002135dbd 100644 --- a/doc/neps/conf.py +++ b/doc/neps/conf.py @@ -39,10 +39,10 @@ templates_path = ['../source/_templates/'] # The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: +# You can specify multiple suffix as a dict mapping suffixes to parsers: # -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +# source_suffix = {'.rst': 'restructuredtext', '.md': 'markdown'} +source_suffix = {'.rst': 'restructuredtext'} # The master toctree document. master_doc = 'content' diff --git a/doc/neps/index.rst b/doc/neps/index.rst index 1891641cbafd..5202c9dd6e91 100644 --- a/doc/neps/index.rst +++ b/doc/neps/index.rst @@ -6,7 +6,7 @@ This page provides an overview of development priorities for NumPy. Specifically, it contains a roadmap with a higher-level overview, as well as NumPy Enhancement Proposals (NEPs)—suggested changes to the library—in various stages of discussion or completion. -See :doc:`nep-0000` for more informations about NEPs. +See :doc:`nep-0000` for more information about NEPs. Roadmap ------- diff --git a/doc/neps/nep-0025-missing-data-3.rst b/doc/neps/nep-0025-missing-data-3.rst index 1756ce491188..1cea24fc8ee8 100644 --- a/doc/neps/nep-0025-missing-data-3.rst +++ b/doc/neps/nep-0025-missing-data-3.rst @@ -432,7 +432,7 @@ follows: * strings: the first byte (or, in the case of unicode strings, first 4 bytes) is used as a flag to indicate NA, and the rest of the data gives the actual string. (no R compatibility possible) -* objects: Two options (FIXME): either we don't include an NA-ful version, or +* objects: Two options (FIXME): either we don't include an NA-full version, or we use np.NA as the NA bit pattern. * boolean: we do whatever R does (FIXME: look this up -- 0 == FALSE, 1 == TRUE, 2 == NA?) diff --git a/doc/neps/nep-0027-zero-rank-arrarys.rst b/doc/neps/nep-0027-zero-rank-arrays.rst similarity index 100% rename from doc/neps/nep-0027-zero-rank-arrarys.rst rename to doc/neps/nep-0027-zero-rank-arrays.rst diff --git a/doc/neps/nep-0040-legacy-datatype-impl.rst b/doc/neps/nep-0040-legacy-datatype-impl.rst index 6fa652eb07ee..3aacc5e9d75d 100644 --- a/doc/neps/nep-0040-legacy-datatype-impl.rst +++ b/doc/neps/nep-0040-legacy-datatype-impl.rst @@ -523,7 +523,7 @@ in the array and: If ``dtype=...`` is given, this dtype is used unmodified, unless it is an unspecific *parametric dtype instance* which means "S0", "V0", "U0", -"datetime64", and "timdelta64". +"datetime64", and "timedelta64". These are thus flexible datatypes without length 0 – considered to be unsized – and datetimes or timedelta without a unit attached ("generic unit"). diff --git a/doc/neps/nep-0043-extensible-ufuncs.rst b/doc/neps/nep-0043-extensible-ufuncs.rst index 4bac8d7a3282..b10e21b1e9d8 100644 --- a/doc/neps/nep-0043-extensible-ufuncs.rst +++ b/doc/neps/nep-0043-extensible-ufuncs.rst @@ -241,7 +241,7 @@ to define string equality, will be added to a ufunc. nin = 1 nout = 1 # DTypes are stored on the BoundArrayMethod and not on the internal - # ArrayMethod, to reference cyles. + # ArrayMethod, to reference cycles. DTypes = (String, String, Bool) def resolve_descriptors(self: ArrayMethod, DTypes, given_descrs): @@ -316,7 +316,7 @@ the following: .. code-block:: python def promote_timedelta_integer(ufunc, dtypes): - new_dtypes = (Timdelta64, Int64, dtypes[-1]) + new_dtypes = (Timedelta64, Int64, dtypes[-1]) # Resolve again, using Int64: return ufunc.resolve_impl(new_dtypes) @@ -609,7 +609,7 @@ definitions (see also :ref:`NEP 42 ` ``CastingImpl``): int nin, nout; PyArray_DTypeMeta **dtypes; - /* Operand descriptors, filled in by resolve_desciptors */ + /* Operand descriptors, filled in by resolve_descriptors */ PyArray_Descr **descriptors; void *reserved; // For Potential in threading (Interpreter state) diff --git a/doc/neps/nep-0050-scalar-promotion.rst b/doc/neps/nep-0050-scalar-promotion.rst index aa04dd2c740e..974f6691d363 100644 --- a/doc/neps/nep-0050-scalar-promotion.rst +++ b/doc/neps/nep-0050-scalar-promotion.rst @@ -509,9 +509,9 @@ will be ignored. This means, that operations will never silently use the The user will have to write one of:: np.array([3]) + np.array(2**100) - np.array([3]) + np.array(2**100, dtype=object) + np.array([3]) + np.array(2**100, dtype=np.object_) -As such implicit conversion to ``object`` should be rare and the work-around +As such implicit conversion to ``object_`` should be rare and the work-around is clear, we expect that the backwards compatibility concerns are fairly small. diff --git a/doc/neps/nep-0053-c-abi-evolution.rst b/doc/neps/nep-0053-c-abi-evolution.rst index 16744dc0fde3..c0193af6732c 100644 --- a/doc/neps/nep-0053-c-abi-evolution.rst +++ b/doc/neps/nep-0053-c-abi-evolution.rst @@ -47,7 +47,7 @@ The implementation of this NEP consists would consist of two steps: Motivation and scope ==================== -The NumPy API conists of more than 300 functions and numerous macros. +The NumPy API consists of more than 300 functions and numerous macros. Many of these are outdated: some were only ever used within NumPy, exist only for compatibility with NumPy's predecessors, or have no or only a single known downstream user (i.e. SciPy). diff --git a/doc/neps/nep-0055-string_dtype.rst b/doc/neps/nep-0055-string_dtype.rst index 7e29e1425e8c..28dc9572ed6a 100644 --- a/doc/neps/nep-0055-string_dtype.rst +++ b/doc/neps/nep-0055-string_dtype.rst @@ -51,9 +51,7 @@ needs and then changes in the Python ecosystem. Support for strings was added to NumPy to support users of the NumArray ``chararray`` type. Remnants of this are still visible in the NumPy API: string-related functionality lives in ``np.char``, to support the -``np.char.chararray`` class. This class is not formally deprecated, but has a -had comment in the module docstring suggesting to use string dtypes instead -since NumPy 1.4. +``np.char.chararray`` class, which was deprecated in NumPy 2.5. NumPy's ``bytes_`` DType was originally used to represent the Python 2 ``str`` type before Python 3 support was added to NumPy. The bytes DType makes the most @@ -224,7 +222,7 @@ to fixed-width unicode arrays:: In [3]: data = [str(i) * 10 for i in range(100_000)] - In [4]: %timeit arr_object = np.array(data, dtype=object) + In [4]: %timeit arr_object = np.array(data, dtype=np.object_) 3.15 ms Âą 74.4 Âĩs per loop (mean Âą std. dev. of 7 runs, 100 loops each) In [5]: %timeit arr_stringdtype = np.array(data, dtype=StringDType()) @@ -242,7 +240,7 @@ for strings, the string loading performance of ``StringDType`` should improve. String operations have similar performance:: - In [7]: %timeit np.array([s.capitalize() for s in data], dtype=object) + In [7]: %timeit np.array([s.capitalize() for s in data], dtype=np.object_) 31.6 ms Âą 728 Âĩs per loop (mean Âą std. dev. of 7 runs, 10 loops each) In [8]: %timeit np.char.capitalize(arr_stringdtype) diff --git a/doc/neps/nep-0057-numpy-platform-support.rst b/doc/neps/nep-0057-numpy-platform-support.rst new file mode 100644 index 000000000000..570287d4d0d4 --- /dev/null +++ b/doc/neps/nep-0057-numpy-platform-support.rst @@ -0,0 +1,328 @@ +.. _NEP57: + +=============================== +NEP 57 — NumPy platform support +=============================== + +:Author: Ralf Gommers +:Status: Draft +:Type: Process +:Created: 2026-01-30 +:Resolution: - + + +Abstract +-------- + +This NEP documents how a platform - i.e., a specific operating system, CPU +architecture and CPython interpreter - becomes supported in NumPy, what +platforms are currently supported, and were supported in the (recent) past. + + +Motivation and scope +-------------------- + +This policy is drafted now (early 2026) because there is a lot of interest in +extending the number of platforms NumPy supports through wheels in particular. +It is a policy specific to NumPy - even though other projects may possibly want +to refer to it - for several reasons: + +* It involves committing to a nontrivial amount of maintainer effort, +* Personal commitment from a maintainer may make the difference between a + yes and a no of supporting a platform (e.g., NumPy supported PyPy for a + long time because of the efforts of one maintainer) +* Support for a platform being possible at all may depend on features of the + code base (e.g., NumPy supports 32-bit Python on Windows while SciPy does + not because there's no suitable compiler toolchain for it). +* The number of wheels depends on whether the Stable ABI can be used (NumPy + is more performance-sensitive for small arrays, so can't use it) + + +The scope of this NEP includes: + +- The definition of tiers of support for platforms by NumPy +- Policies and decision making for moving a platform to a different tier + +Out of scope for this NEP are: + +- Binary distributions of NumPy outside of PyPI +- Partial testing in CI (e.g., testing only SIMD-specific code under QEMU) +- More detailed breakdowns of wheels and support matrices, like compiler flavor + and minimum version, or the BLAS library that is used in the build. + + +Support tiers +------------- + +*This section is inspired by PEP 11 (CPython platform support), although +definitions are not matching, because NumPy is not nearly as large a project as +CPython.* + +Platform support is broken down into tiers. Each tier comes with different +requirements which lead to different promises being made about support. + +To be promoted to a tier, +`Steering council +`__ +support is required and is expected to be driven by team consensus. Demotion to +a lower tier occurs when the requirements of the current tier are no longer met +for a platform for an extended period of time based on the judgment of the +Steering Council. For platforms which no longer meet the requirements of any +tier by the middle of a new feature release cycle, an announcement will be made +to warn the community of the pending removal of support for the platform. If +the platform is not brought into line for at least one of the tiers by the +first release candidate, it will be listed as unsupported in this NEP. + + +General principles +~~~~~~~~~~~~~~~~~~ + +1. Maintainer effort is expensive, and we collectively have limited bandwidth - + hence platform support is strongly influenced by the willingness of one or + more maintainers to put in that effort. + + - Maintainers are trusted by the whole team. We generally do not question + *why* a maintainer is motivated to put in the effort. If they are being + paid for their effort or doing it as part of their job, that is fine - + however they should disclose this to the Steering Council, and indicate + whether long-term support is conditional on their employment or contractor + status for the support tiers that include releasing wheels to PyPI. + + *Rationale: releasing wheels to PyPI is a long-term commitment by the + project as a whole, see the backwards compatibility section below.* + +2. CI support for the platform is required, preferably with native runners. + Free is best, however decisions on paid CI are up to the Steering Council. + Emulation for running the test suite (e.g., under QEMU) or self-hosted + buildbots are slower and less reliable, hence not preferred. + +3. There should be broad enough demand for support for the platform for the + tiers that include releasing wheels to PyPI. + + - A previously used rule of thumb: >=0.5% of the user base should be on this + platform. There may be reasons to deviate from this rule of thumb. + + *Note: finding clean data sources isn't always easy. If wheels are already + being shipped, for NumPy or for a comparable project, then download data + from PyPI may be obtained through BigQuery. For new platforms, sources + like the* + `Steam Hardware & Software Survey `__ + *may have to be used.* + +4. Adding a regular CI job (i.e., not aimed at uploading wheels to PyPI) for a + platform to the NumPy CI matrix is much cheaper, and easily reverted in case + of problems. The bar for adding such jobs is low, and assessed on a + case-by-case basis. + +5. For all platforms in any supported tier: the relevant prerequisites in our + dependencies must be met. E.g., build tools have support, and for wheels + there is support in CPython, PyPI, cibuildwheel, manylinux, and + ``scipy-openblas64`` or another easily-integrated BLAS library. + +6. Decision making: + + - Moving a platform to a lower support tier must be discussed on the mailing list. + The circumstances for each platform are unique so the community will + evaluate each proposal to demote a platform on a case-by-case basis. + - Moving a platform to a higher support tier, if that higher tier includes + releasing wheels on PyPI for that platform, must be discussed on the + mailing list. + - Adding an entry to a support tier in this NEP for (a) an unsupported + platform or (b) a tier which does not include uploading wheels to PyPI can + be done on GitHub through a regular pull request (assuming it's clear from + the discussion that the relevant maintainers agree it doesn't need to hit + the mailing list). + + +Releasing wheels to PyPI +'''''''''''''''''''''''' + +The wheels that the NumPy team releases on PyPI for the ``numpy`` package get +hundreds of millions of downloads a month. We therefore highly value both +reliability and supply chain security of those release artifacts. Compromising +on those aspects is unlikely to be acceptable for the NumPy team. + +The details of how wheels are produced, tested and distributed can be found in +the `numpy/numpy-release `__ +repository. Some key requirements of the current setup, which aren't likely to +change soon, are: + +1. Must be buildable on publicly-visible CI infrastructure (i.e., GitHub). +2. Must be tested well enough (meaning native runners are preferred; QEMU is quite slow). +3. Must be publishable to PyPI automatically, through PyPI's trusted publishing + mechanism. + + +Tier 1 +~~~~~~ + +- Must have regular CI support on GitHub or (exceptionally) through another + well-integrated CI platform that the release team and Steering Council deem + acceptable. +- The NumPy team releases wheels on PyPI for this platform. +- CI failures (either regular CI or wheel build CI) block releases. +- All maintainers are responsible to keep the ``main`` branch and wheel builds + working. + +Tier 1 platforms: + ++---------------------------+--------------------------------------------------------------------------+ +| Platform | Notes | ++===========================+==========================================================================+ +| Windows x86-64 | | ++---------------------------+--------------------------------------------------------------------------+ +| Windows arm64 | | ++---------------------------+--------------------------------------------------------------------------+ +| Windows x86 | 32-bit Python: note this is shipped without BLAS, it's legacy | ++---------------------------+--------------------------------------------------------------------------+ +| Linux x86-64 (manylinux) | | ++---------------------------+--------------------------------------------------------------------------+ +| Linux aarch64 (manylinux) | | ++---------------------------+--------------------------------------------------------------------------+ +| macOS arm64 | | ++---------------------------+--------------------------------------------------------------------------+ +| macOS x86-64 | Expected to move to unsupported by 2027/28 once the platform is dropped | +| | by GitHub | ++---------------------------+--------------------------------------------------------------------------+ + + +Tier 2 +~~~~~~ + +- Must have regular CI support, either as defined for Tier 1 or through a + reliable self-hosted service. +- The NumPy team releases wheels on PyPI for this platform. +- CI failures block releases. +- Must have at least one maintainer who commits to take primary and long-term + responsibility for keeping the ``main`` branch and wheel builds working. + +Tier 2 platforms: + ++---------------------------+-------+------------------------------------------+ +| Platform | Notes | Contacts | ++===========================+=======+==========================================+ +| Linux x86-64 (musllinux) | | Ralf Gommers | ++---------------------------+-------+------------------------------------------+ +| Linux aarch64 (musllinux) | | Ralf Gommers | ++---------------------------+-------+------------------------------------------+ +| Free-threaded CPython | | Nathan Goldbaum, Kumar Aditya, | +| | | Ralf Gommers | ++---------------------------+-------+------------------------------------------+ + + +Tier 3 +~~~~~~ + +- Is supported as part of NumPy's regular CI setup for the ``main`` branch. CI + support as defined for Tier 2. +- No wheels are released on PyPI for this platform. +- CI failures block releases (skips may be applied when the failure is clearly + platform-specific and does not indicate a regression in core functionality). +- Must have at least one maintainer or a regular contributor trusted by the + NumPy maintainers who commits to take responsibility for CI on the ``main`` + branch working. + +Tier 3 platforms: + ++--------------------+----------------------------------------+----------------------------------+ +| Platform | Notes | Contacts | ++====================+========================================+==================================+ +| FreeBSD | Runs on Cirrus CI | Ralf Gommers | ++--------------------+----------------------------------------+----------------------------------+ +| Linux ppc64le | Runs on IBM-provided self-hosted | Sandeep Gupta | +| | runners, see gh-22318_ | | ++--------------------+----------------------------------------+----------------------------------+ +| Emscripten/Pyodide | We currently provide nightly wheels, | Agriya Khetarpal, Gyeongjae Choi | +| | used for interactive docs | | ++--------------------+----------------------------------------+----------------------------------+ + + +Unsupported platforms +~~~~~~~~~~~~~~~~~~~~~ + +All platforms not listed in the above tiers are unsupported by the NumPy team. +We do not develop and test on such platforms, and so cannot provide any +promises that NumPy will work on them. + +However, the code base does include unsupported code – that is, code specific +to unsupported platforms. Contributions in this area are welcome as long as +they: + +- pose a minimal maintenance burden to the core team, and +- benefit substantially more people than the contributor. + +Unsupported platforms (previously in a supported tier, may be an incomplete +list): + ++------------------------------------+--------------------------------------------------+ +| Platform | Notes | ++====================================+==================================================+ +| PyPy | Was Tier 2 for releases <=2.4.x, see gh-30416_ | ++------------------------------------+--------------------------------------------------+ +| macOS ppc64, universal, universal2 | | ++------------------------------------+--------------------------------------------------+ +| Linux i686 | Dropped in 1.22.0, low demand | ++------------------------------------+--------------------------------------------------+ +| Linux on IBM Z (s390x) | CI jobs used to run on TravisCI | ++------------------------------------+--------------------------------------------------+ + +Unsupported platforms (known interest in moving to a higher tier): + ++----------+------------------+ +| Platform | Notes | ++==========+==================+ +| iOS | See gh-28759_ | ++----------+------------------+ +| Android | See gh-30412_ | ++----------+------------------+ +| RISC-V | See gh-30216_ | ++----------+------------------+ +| WASI | See gh-25859_ | ++----------+------------------+ + + +Backward compatibility +---------------------- + +Moving a platform to a lower tier of support is generally backwards compatible. +The exception is stopping to release wheels on PyPI for a platform. That causes +significant disruption for existing users on that platform. Their install commands +(e.g., ``pip install numpy``) may stop working because if a new release no longer +has wheels for the platform, by default ``pip`` will try to build from source rather +than using a wheel from an older version of ``numpy``. Therefore, we should be very +reluctant to drop wheels for any platform. + + +Discussion +---------- + +- `ENH: Provide Windows ARM64 wheels (numpy#22530) `__ +- `Releasing PowerPC (ppc64le) wheels? (numpy#22318) `__ +- `MAINT: drop support for PyPy (numpy#30416) `__ +- `ENH: Build and distribute manylinux wheels for riscv64 `__ +- `BLD: Add support for building iOS wheels (numpy#28759) `__ +- `BLD: Add Android support `__ +- `ENH: WASI Build `__ +- `PEP 11 - CPython platform support `__ +- `Debian's supported architectures `__ +- `Discussion about supported platforms for wheels (scientific-python issue/discussion (Nov 2025) `__ +- `What platforms should wheels be provided for by default? (Packaging Discourse thread, 2026) `__ +- `Expectations that projects provide ever more wheels (pypackaging-native) `__ + + +References and footnotes +------------------------ + +.. _gh-22318: https://github.com/numpy/numpy/issues/22318 +.. _gh-22530: https://github.com/numpy/numpy/issues/22530 +.. _gh-25859: https://github.com/numpy/numpy/issues/25859 +.. _gh-28759: https://github.com/numpy/numpy/pull/28759 +.. _gh-30216: https://github.com/numpy/numpy/issues/30216 +.. _gh-30412: https://github.com/numpy/numpy/pull/30412 +.. _gh-30416: https://github.com/numpy/numpy/issues/30416 + + +Copyright +--------- + +This document has been placed in the public domain. diff --git a/doc/neps/roadmap.rst b/doc/neps/roadmap.rst index 01cd21158be0..f4a9907dcc7e 100644 --- a/doc/neps/roadmap.rst +++ b/doc/neps/roadmap.rst @@ -110,8 +110,7 @@ User experience Type annotations ```````````````` -Type annotations for most NumPy functionality is complete (although some -submodules like ``numpy.ma`` are missing return types), so users can use tools +Type annotations for NumPy functionality are complete, so users can use tools like `mypy`_ to type check their code and IDEs can improve their support for NumPy. Improving those type annotations, for example to support annotating array shapes (see `gh-16544 `__), @@ -214,7 +213,7 @@ Maintenance - ``numpy.ma`` is still in poor shape and under-maintained. It needs to be improved, ideas include: - - Rewrite masked arrays to not be a ndarray subclass -- maybe in a separate project? + - Rewrite masked arrays to not be an ndarray subclass -- maybe in a separate project? - MaskedArray as a duck-array type, and/or - dtypes that support missing values diff --git a/doc/neps/scope.rst b/doc/neps/scope.rst index 93887c4b12ff..ffa3d8655ad8 100644 --- a/doc/neps/scope.rst +++ b/doc/neps/scope.rst @@ -36,10 +36,10 @@ Here, we describe aspects of N-d array computation that are within scope for Num - NumPy provides some **infrastructure for other packages in the scientific Python ecosystem**: - - numpy.distutils (build support for C++, Fortran, BLAS/LAPACK, and other - relevant libraries for scientific computing) + - numpy.distutils (removed in NumPy 2.5.0, was providing build support for C++, Fortran, + BLAS/LAPACK, and other relevant libraries for scientific computing) - f2py (generating bindings for Fortran code) - - testing utilities + - testing utilities (mostly deprecated, pytest does a good job) - **Speed**: we take performance concerns seriously and aim to execute operations on large arrays with similar performance as native C diff --git a/doc/release/upcoming_changes/28590.improvement.rst b/doc/release/upcoming_changes/28590.improvement.rst deleted file mode 100644 index 35f5cb3c2ad2..000000000000 --- a/doc/release/upcoming_changes/28590.improvement.rst +++ /dev/null @@ -1,33 +0,0 @@ -Fix ``flatiter`` indexing edge cases ------------------------------------- - -The ``flatiter`` object now shares the same index preparation logic as -``ndarray``, ensuring consistent behavior and fixing several issues where -invalid indices were previously accepted or misinterpreted. - -Key fixes and improvements: - -* Stricter index validation - - - Boolean non-array indices like ``arr.flat[[True, True]]`` were - incorrectly treated as ``arr.flat[np.array([1, 1], dtype=int)]``. - They now raise an index error. Note that indices that match the - iterator's shape are expected to not raise in the future and be - handled as regular boolean indices. Use ``np.asarray()`` if - you want to match that behavior. - - Float non-array indices were also cast to integer and incorrectly - treated as ``arr.flat[np.array([1.0, 1.0], dtype=int)]``. This is now - deprecated and will be removed in a future version. - - 0-dimensional boolean indices like ``arr.flat[True]`` are also - deprecated and will be removed in a future version. - -* Consistent error types: - - Certain invalid `flatiter` indices that previously raised `ValueError` - now correctly raise `IndexError`, aligning with `ndarray` behavior. - -* Improved error messages: - - The error message for unsupported index operations now provides more - specific details, including explicitly listing the valid index types, - instead of the generic ``IndexError: unsupported index operation``. diff --git a/doc/release/upcoming_changes/28595.improvement.rst b/doc/release/upcoming_changes/28595.improvement.rst deleted file mode 100644 index aea833f5179c..000000000000 --- a/doc/release/upcoming_changes/28595.improvement.rst +++ /dev/null @@ -1,7 +0,0 @@ -Improved error handling in `np.quantile` ----------------------------------------- -`np.quantile` now raises errors if: - -* All weights are zero -* At least one weight is `np.nan` -* At least one weight is `np.inf` \ No newline at end of file diff --git a/doc/release/upcoming_changes/28767.change.rst b/doc/release/upcoming_changes/28767.change.rst deleted file mode 100644 index ec173c3672b0..000000000000 --- a/doc/release/upcoming_changes/28767.change.rst +++ /dev/null @@ -1,10 +0,0 @@ -``unique_values`` for string dtypes may return unsorted data ------------------------------------------------------------- -np.unique now supports hash‐based duplicate removal for string dtypes. -This enhancement extends the hash-table algorithm to byte strings ('S'), -Unicode strings ('U'), and the experimental string dtype ('T', StringDType). -As a result, calling np.unique() on an array of strings will use -the faster hash-based method to obtain unique values. -Note that this hash-based method does not guarantee that the returned unique values will be sorted. -This also works for StringDType arrays containing None (missing values) -when using equal_nan=True (treating missing values as equal). diff --git a/doc/release/upcoming_changes/28767.performance.rst b/doc/release/upcoming_changes/28767.performance.rst deleted file mode 100644 index ef8ac1c3a45d..000000000000 --- a/doc/release/upcoming_changes/28767.performance.rst +++ /dev/null @@ -1,10 +0,0 @@ -Performance improvements to ``np.unique`` for string dtypes ------------------------------------------------------------ -The hash-based algorithm for unique extraction provides -an order-of-magnitude speedup on large string arrays. -In an internal benchmark with about 1 billion string elements, -the hash-based np.unique completed in roughly 33.5 seconds, -compared to 498 seconds with the sort-based method -– about 15× faster for unsorted unique operations on strings. -This improvement greatly reduces the time to find unique values -in very large string datasets. diff --git a/doc/release/upcoming_changes/28896.change.rst b/doc/release/upcoming_changes/28896.change.rst deleted file mode 100644 index 47538b7b22b2..000000000000 --- a/doc/release/upcoming_changes/28896.change.rst +++ /dev/null @@ -1,56 +0,0 @@ -Modulate dispatched x86 CPU features ------------------------------------- - -**IMPORTANT**: The default setting for `cpu-baseline`` on x86 has been raised to `x86-64-v2` microarchitecture. -This can be changed to none during build time to support older CPUs, -though SIMD optimizations for pre-2009 processors are no longer maintained. - -NumPy has reorganized x86 CPU features into microarchitecture-based groups instead of individual features, -aligning with Linux distribution standards and Google Highway requirements. - -Key changes: -* Replaced individual x86 features with microarchitecture levels: ``X86_V2``, ``X86_V3``, and ``X86_V4`` -* Raised the baseline to ``X86_V2`` -* Improved ``-`` operator behavior to properly exclude successor features that imply the excluded feature -* Added meson redirections for removed feature names to maintain backward compatibility -* Removed compiler compatibility workarounds for partial feature support (e.g., AVX512 without mask operations) -* Removed legacy AMD features (XOP, FMA4) and discontinued Intel Xeon Phi support - -New Feature Group Hierarchy: - -.. list-table:: - :header-rows: 1 - :align: left - - * - Name - - Implies - - Includes - * - ``X86_V2`` - - - - ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE4_1`` ``SSE4_2`` ``POPCNT`` ``CX16`` ``LAHF`` - * - ``X86_V3`` - - ``X86_V2`` - - ``AVX`` ``AVX2`` ``FMA3`` ``BMI`` ``BMI2`` ``LZCNT`` ``F16C`` ``MOVBE`` - * - ``X86_V4`` - - ``X86_V3`` - - ``AVX512F`` ``AVX512CD`` ``AVX512VL`` ``AVX512BW`` ``AVX512DQ`` - * - ``AVX512_ICL`` - - ``X86_V4`` - - ``AVX512VBMI`` ``AVX512VBMI2`` ``AVX512VNNI`` ``AVX512BITALG`` ``AVX512VPOPCNTDQ`` ``AVX512IFMA`` ``VAES`` ``GFNI`` ``VPCLMULQDQ`` - * - ``AVX512_SPR`` - - ``AVX512_ICL`` - - ``AVX512FP16`` - - -These groups correspond to CPU generations: - -- ``X86_V2``: x86-64-v2 microarchitectures (CPUs since 2009) -- ``X86_V3``: x86-64-v3 microarchitectures (CPUs since 2015) -- ``X86_V4``: x86-64-v4 microarchitectures (AVX-512 capable CPUs) -- ``AVX512_ICL``: Intel Ice Lake and similar CPUs -- ``AVX512_SPR``: Intel Sapphire Rapids and newer CPUs - -.. note:: - On 32-bit x86, ``cx16`` is excluded from ``X86_V2``. - -Documentation has been updated with details on using these new feature groups with the current meson build system. diff --git a/doc/release/upcoming_changes/28925.deprecation.rst b/doc/release/upcoming_changes/28925.deprecation.rst deleted file mode 100644 index a421839394fa..000000000000 --- a/doc/release/upcoming_changes/28925.deprecation.rst +++ /dev/null @@ -1,9 +0,0 @@ -Setting the ``strides`` attribute is deprecated ------------------------------------------------ -Setting the strides attribute is now deprecated since mutating -an array is unsafe if an array is shared, especially by multiple -threads. As an alternative, you can create a new view (no copy) via: -* `np.lib.stride_tricks.strided_window_view` if applicable, -* `np.lib.stride_tricks.as_strided` for the general case, -* or the `np.ndarray` constructor (``buffer`` is the original array) for a light-weight version. - diff --git a/doc/release/upcoming_changes/29030.compatibility.rst b/doc/release/upcoming_changes/29030.compatibility.rst deleted file mode 100644 index cf08551e28ee..000000000000 --- a/doc/release/upcoming_changes/29030.compatibility.rst +++ /dev/null @@ -1,6 +0,0 @@ -* NumPy's C extension modules have begun to use multi-phase initialisation, - as defined by :pep:`489`. As part of this, a new explicit check has been added - that each such module is only imported once per Python process. This comes with - the side-effect that deleting ``numpy`` from ``sys.modules`` and re-importing - it will now fail with an ``ImportError``. This has always been unsafe, with - unexpected side-effects, though did not previously raise an error. diff --git a/doc/release/upcoming_changes/29052.deprecation.rst b/doc/release/upcoming_changes/29052.deprecation.rst deleted file mode 100644 index e302907abfba..000000000000 --- a/doc/release/upcoming_changes/29052.deprecation.rst +++ /dev/null @@ -1,10 +0,0 @@ -Positional ``out`` argument to `np.maximum`, `np.minimum` is deprecated ------------------------------------------------------------------------ -Passing the output array ``out`` positionally to `numpy.maximum` and -`numpy.minimum` is deprecated. For example, ``np.maximum(a, b, c)`` will -emit a deprecation warning, since ``c`` is treated as the output buffer -rather than a third input. - -Always pass the output with the keyword form, e.g. -``np.maximum(a, b, out=c)``. This makes intent clear and simplifies -type annotations. diff --git a/doc/release/upcoming_changes/29060.change.rst b/doc/release/upcoming_changes/29060.change.rst deleted file mode 100644 index 1561da7bf94e..000000000000 --- a/doc/release/upcoming_changes/29060.change.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Multiplication between a string and integer now raises OverflowError instead - of MemoryError if the result of the multiplication would create a string that - is too large to be represented. This follows Python's behavior. diff --git a/doc/release/upcoming_changes/29094.compatibility.rst b/doc/release/upcoming_changes/29094.compatibility.rst deleted file mode 100644 index 961ee6504dae..000000000000 --- a/doc/release/upcoming_changes/29094.compatibility.rst +++ /dev/null @@ -1,7 +0,0 @@ -The Macro NPY_ALIGNMENT_REQUIRED has been removed -------------------------------------------------- -The macro was defined in the `npy_cpu.h` file, so might be regarded as -semipublic. As it turns out, with modern compilers and hardware it is almost -always the case that alignment is required, so numpy no longer uses the macro. -It is unlikely anyone uses it, but you might want to compile with the `-Wundef` -flag or equivalent to be sure. diff --git a/doc/release/upcoming_changes/29105.change.rst b/doc/release/upcoming_changes/29105.change.rst deleted file mode 100644 index b5d4a9838f30..000000000000 --- a/doc/release/upcoming_changes/29105.change.rst +++ /dev/null @@ -1 +0,0 @@ -* The accuracy of ``np.quantile`` and ``np.percentile`` for 16- and 32-bit floating point input data has been improved. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29112.improvement.rst b/doc/release/upcoming_changes/29112.improvement.rst deleted file mode 100644 index 01baa668b9fe..000000000000 --- a/doc/release/upcoming_changes/29112.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -Improved error message for `assert_array_compare` -------------------------------------------------- -The error message generated by `assert_array_compare` which is used by functions -like `assert_allclose`, `assert_array_less` etc. now also includes information -about the indices at which the assertion fails. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29129.enhancement.rst b/doc/release/upcoming_changes/29129.enhancement.rst deleted file mode 100644 index 9a14f13c1f4a..000000000000 --- a/doc/release/upcoming_changes/29129.enhancement.rst +++ /dev/null @@ -1,7 +0,0 @@ -``'same_value'`` for casting by value -------------------------------------- -The ``casting`` kwarg now has a ``'same_value'`` option that checks the actual -values can be round-trip cast without changing value. Currently it is only -implemented in `ndarray.astype`. This will raise a ``ValueError`` if any of the -values in the array would change as a result of the cast, including rounding of -floats or overflowing of ints. diff --git a/doc/release/upcoming_changes/29137.compatibility.rst b/doc/release/upcoming_changes/29137.compatibility.rst deleted file mode 100644 index 3ac9da2a4c48..000000000000 --- a/doc/release/upcoming_changes/29137.compatibility.rst +++ /dev/null @@ -1,3 +0,0 @@ -* `numpy.round` now always returns a copy. Previously, it returned a view - for integer inputs for ``decimals >= 0`` and a copy in all other cases. - This change brings ``round`` in line with ``ceil``, ``floor`` and ``trunc``. diff --git a/doc/release/upcoming_changes/29165.performance.rst b/doc/release/upcoming_changes/29165.performance.rst deleted file mode 100644 index 4e1a9a4ecdbc..000000000000 --- a/doc/release/upcoming_changes/29165.performance.rst +++ /dev/null @@ -1,7 +0,0 @@ -Rewrite of `np.ndindex` using `itertools.product` --------------------------------------------------- -The `numpy.ndindex` function now uses `itertools.product` internally, -providing significant improvements in performance for large iteration spaces, -while maintaining the original behavior and interface. -For example, for an array of shape (50, 60, 90) the NumPy `ndindex` -benchmark improves performance by a factor 5.2. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29179.change.rst b/doc/release/upcoming_changes/29179.change.rst deleted file mode 100644 index 12eb6804d3dd..000000000000 --- a/doc/release/upcoming_changes/29179.change.rst +++ /dev/null @@ -1,4 +0,0 @@ -Fix bug in ``matmul`` for non-contiguous out kwarg parameter ------------------------------------------------------------- -In some cases, if ``out`` was non-contiguous, ``np.matmul`` would cause -memory corruption or a c-level assert. This was new to v2.3.0 and fixed in v2.3.1. diff --git a/doc/release/upcoming_changes/29240.new_feature.rst b/doc/release/upcoming_changes/29240.new_feature.rst deleted file mode 100644 index 02d43364b200..000000000000 --- a/doc/release/upcoming_changes/29240.new_feature.rst +++ /dev/null @@ -1 +0,0 @@ -* Let ``np.size`` accept multiple axes. diff --git a/doc/release/upcoming_changes/29244.deprecation.rst b/doc/release/upcoming_changes/29244.deprecation.rst new file mode 100644 index 000000000000..cec845302901 --- /dev/null +++ b/doc/release/upcoming_changes/29244.deprecation.rst @@ -0,0 +1,6 @@ +Setting the ``dtype`` attribute is deprecated +--------------------------------------------- +Setting the dtype attribute is now deprecated since mutating +an array is unsafe if an array is shared, especially by multiple +threads. As an alternative, you can create a view with a new dtype +via `array.view(dtype=new_dtype)`. diff --git a/doc/release/upcoming_changes/29273.new_feature.rst b/doc/release/upcoming_changes/29273.new_feature.rst deleted file mode 100644 index 3e380ca0dbe6..000000000000 --- a/doc/release/upcoming_changes/29273.new_feature.rst +++ /dev/null @@ -1 +0,0 @@ -Extend ``numpy.pad`` to accept a dictionary for the ``pad_width`` argument. diff --git a/doc/release/upcoming_changes/29301.deprecation.rst b/doc/release/upcoming_changes/29301.deprecation.rst deleted file mode 100644 index e520b692458d..000000000000 --- a/doc/release/upcoming_changes/29301.deprecation.rst +++ /dev/null @@ -1,7 +0,0 @@ -``align=`` must be passed as boolean to ``np.dtype()`` ------------------------------------------------------- -When creating a new ``dtype`` a ``VisibleDeprecationWarning`` will be -given if ``align=`` is not a boolean. -This is mainly to prevent accidentally passing a subarray align flag where it -has no effect, such as ``np.dtype("f8", 3)`` instead of ``np.dtype(("f8", 3))``. -We strongly suggest to always pass ``align=`` as a keyword argument. diff --git a/doc/release/upcoming_changes/29338.change.rst b/doc/release/upcoming_changes/29338.change.rst deleted file mode 100644 index 64bf188009c8..000000000000 --- a/doc/release/upcoming_changes/29338.change.rst +++ /dev/null @@ -1,9 +0,0 @@ -``__array_interface__`` with NULL pointer changed -------------------------------------------------- -The array interface now accepts NULL pointers (NumPy will do -its own dummy allocation, though). -Previously, these incorrectly triggered an undocumented -scalar path. -In the unlikely event that the scalar path was actually desired, -you can (for now) achieve the previous behavior via the correct -scalar path by not providing a ``data`` field at all. diff --git a/doc/release/upcoming_changes/29396.improvement.rst b/doc/release/upcoming_changes/29396.improvement.rst deleted file mode 100644 index 2cd3d81ad9d8..000000000000 --- a/doc/release/upcoming_changes/29396.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -Show unit information in ``__repr__`` for ``datetime64("NaT")`` ------------------------------------------------------------------- -When a `datetime64` object is "Not a Time" (NaT), its ``__repr__`` method now -includes the time unit of the datetime64 type. This makes it consistent with -the behavior of a `timedelta64` object. diff --git a/doc/release/upcoming_changes/29423.new_feature.rst b/doc/release/upcoming_changes/29423.new_feature.rst deleted file mode 100644 index 7e83604b0049..000000000000 --- a/doc/release/upcoming_changes/29423.new_feature.rst +++ /dev/null @@ -1,7 +0,0 @@ -``StringDType`` fill_value support in `numpy.ma.MaskedArray` ------------------------------------------------------------- -Masked arrays now accept and preserve a Python ``str`` as their ``fill_value`` when -using the variable‑width ``StringDType`` (kind ``'T'``), including through slicing -and views. The default is ``'N/A'`` and may be overridden by any valid string. -This fixes issue `gh‑29421 `__ and was -implemented in pull request `gh‑29423 `__. diff --git a/doc/release/upcoming_changes/29536.deprecation.rst b/doc/release/upcoming_changes/29536.deprecation.rst new file mode 100644 index 000000000000..7367a135cdd1 --- /dev/null +++ b/doc/release/upcoming_changes/29536.deprecation.rst @@ -0,0 +1,11 @@ +Setting the ``shape`` attribute is deprecated +--------------------------------------------- +Setting the shape attribute is now deprecated since mutating +an array is unsafe if an array is shared, especially by multiple +threads. As an alternative, you can create a new view via +`np.reshape` or `np.ndarray.reshape`. For example: ``x = np.arange(15); x = np.reshape(x, (3, 5))``. +To ensure no copy is made from the data, one can use ``np.reshape(..., copy=False)``. + +Directly setting the shape on an array is discouraged, but for cases where it is difficult to work +around, e.g., in ``__array_finalize__`` possible with the private method `np.ndarray._set_shape`. + diff --git a/doc/release/upcoming_changes/29537.change.rst b/doc/release/upcoming_changes/29537.change.rst deleted file mode 100644 index 63abbbb5a347..000000000000 --- a/doc/release/upcoming_changes/29537.change.rst +++ /dev/null @@ -1,7 +0,0 @@ -``unique_values`` for complex dtypes may return unsorted data -------------------------------------------------------------- -np.unique now supports hash‐based duplicate removal for complex dtypes. -This enhancement extends the hash‐table algorithm -to all complex types ('c'), and their extended precision variants. -The hash‐based method provides faster extraction of unique values -but does not guarantee that the result will be sorted. diff --git a/doc/release/upcoming_changes/29537.performance.rst b/doc/release/upcoming_changes/29537.performance.rst deleted file mode 100644 index 8c78dc202a2e..000000000000 --- a/doc/release/upcoming_changes/29537.performance.rst +++ /dev/null @@ -1,9 +0,0 @@ -Performance improvements to ``np.unique`` for complex dtypes ------------------------------------------------------------- -The hash-based algorithm for unique extraction now also supports -complex dtypes, offering noticeable performance gains. - -In our benchmarks on complex128 arrays with 200,000 elements, -the hash-based approach was about 1.4–1.5× faster -than the sort-based baseline when there were 20% of unique values, -and about 5× faster when there were 0.2% of unique values. diff --git a/doc/release/upcoming_changes/29550.deprecation.rst b/doc/release/upcoming_changes/29550.deprecation.rst deleted file mode 100644 index ce35477c5010..000000000000 --- a/doc/release/upcoming_changes/29550.deprecation.rst +++ /dev/null @@ -1,6 +0,0 @@ -Assertion and warning control utilities are deprecated ------------------------------------------------------- - -`np.testing.assert_warns` and `np.testing.suppress_warnings` are deprecated. -Use `warnings.catch_warnings`, `warnings.filterwarnings`, ``pytest.warns``, or -``pytest.filterwarnings`` instead. diff --git a/doc/release/upcoming_changes/29569.new_feature.rst b/doc/release/upcoming_changes/29569.new_feature.rst deleted file mode 100644 index ac014c07c7a0..000000000000 --- a/doc/release/upcoming_changes/29569.new_feature.rst +++ /dev/null @@ -1,27 +0,0 @@ -``ndmax`` option for `numpy.array` ----------------------------------------------------- -The ``ndmax`` option is now available for `numpy.array`. -It explicitly limits the maximum number of dimensions created from nested sequences. - -This is particularly useful when creating arrays of list-like objects with ``dtype=object``. -By default, NumPy recurses through all nesting levels to create the highest possible -dimensional array, but this behavior may not be desired when the intent is to preserve -nested structures as objects. The ``ndmax`` parameter provides explicit control over -this recursion depth. - -.. code-block:: python - - # Default behavior: Creates a 2D array - >>> a = np.array([[1, 2], [3, 4]], dtype=object) - >>> a - array([[1, 2], - [3, 4]], dtype=object) - >>> a.shape - (2, 2) - - # With ndmax=1: Creates a 1D array - >>> b = np.array([[1, 2], [3, 4]], dtype=object, ndmax=1) - >>> b - array([list([1, 2]), list([3, 4])], dtype=object) - >>> b.shape - (2,) diff --git a/doc/release/upcoming_changes/29619.deprecation.rst b/doc/release/upcoming_changes/29619.deprecation.rst new file mode 100644 index 000000000000..0c2e651f14b3 --- /dev/null +++ b/doc/release/upcoming_changes/29619.deprecation.rst @@ -0,0 +1,17 @@ +Deprecation of the ``generic`` unit in `numpy.timedelta64` +---------------------------------------------------------- + +Using the ``generic`` unit in `numpy.timedelta64` is now deprecated +since this can lead to unexpected behavior such as non-transitive comparisons. +(see `gh-28287 `__ for details). + +As an alternative, please specify an explicit unit such as ``'s'`` (seconds) +or ``'D'`` (days) when constructing `numpy.timedelta64`. + +Due to this change, operations that implicitly rely on the ``generic`` unit are also deprecated. +For example:: + + arr = np.array([1, 2, 3], dtype="m8[s]") + + # `1` is implicitly converted to generic timedelta64 + arr + 1 diff --git a/doc/release/upcoming_changes/29642.c_api.rst b/doc/release/upcoming_changes/29642.c_api.rst deleted file mode 100644 index 65c804ef829b..000000000000 --- a/doc/release/upcoming_changes/29642.c_api.rst +++ /dev/null @@ -1,13 +0,0 @@ -The NPY_SORTKIND enum has been enhanced with new variables ----------------------------------------------------------- -This is of interest if you are using ``PyArray_Sort`` or ``PyArray_ArgSort``. -We have changed the semantics of the old names in the NPY_SORTKIND enum and -added new ones. The changes are backward compatible, and no recompilation is -needed. The new names of interest are: - -* NPY_SORT_DEFAULT -- default sort (same value as NPY_QUICKSORT) -* NPY_SORT_STABLE -- the sort must be stable (same value as NPY_MERGESORT) -* NPY_SORT_DESCENDING -- the sort must be descending - -The semantic change is that NPY_HEAPSORT is mapped to NPY_QUICKSORT when used. -Note that NPY_SORT_DESCENDING is not yet implemented. diff --git a/doc/release/upcoming_changes/29642.change.rst b/doc/release/upcoming_changes/29642.change.rst deleted file mode 100644 index 4a1706e00bab..000000000000 --- a/doc/release/upcoming_changes/29642.change.rst +++ /dev/null @@ -1,7 +0,0 @@ -Sorting ``kind='heapsort'`` now maps to ``kind='quicksort'`` ------------------------------------------------------------- -It is unlikely that this change will be noticed, but if you do see a change in -execution time or unstable argsort order, that is likely the cause. Please let -us know if there is a performance regression. Congratulate us if it is -improved :) - diff --git a/doc/release/upcoming_changes/29739.change.rst b/doc/release/upcoming_changes/29739.change.rst deleted file mode 100644 index 5d1316a1ba41..000000000000 --- a/doc/release/upcoming_changes/29739.change.rst +++ /dev/null @@ -1,15 +0,0 @@ -``numpy.typing.DTypeLike`` no longer accepts ``None`` ------------------------------------------------------ -The type alias ``numpy.typing.DTypeLike`` no longer accepts ``None``. Instead of - -.. code-block:: python - - dtype: DTypeLike = None - -it should now be - -.. code-block:: python - - dtype: DTypeLike | None = None - -instead. diff --git a/doc/release/upcoming_changes/29750.change.rst b/doc/release/upcoming_changes/29750.change.rst deleted file mode 100644 index 2759c08d8349..000000000000 --- a/doc/release/upcoming_changes/29750.change.rst +++ /dev/null @@ -1,5 +0,0 @@ -The ``npymath`` and ``npyrandom`` libraries now have a ``.lib`` rather than a -``.a`` file extension on win-arm64, for compatibility for building with MSVC and -``setuptools``. Please note that using these static libraries is discouraged -and for existing projects using it, it's best to use it with a matching -compiler toolchain, which is ``clang-cl`` on Windows on Arm. diff --git a/doc/release/upcoming_changes/29813.new_feature.rst b/doc/release/upcoming_changes/29813.new_feature.rst deleted file mode 100644 index 690d7ca88799..000000000000 --- a/doc/release/upcoming_changes/29813.new_feature.rst +++ /dev/null @@ -1,6 +0,0 @@ -Warning emitted when using `where` without `out` ------------------------------------------------- -Ufuncs called with a ``where`` mask and without an ``out`` positional or kwarg will -now emit a warning. This usage tends to trip up users who expect some value in -output locations where the mask is ``False`` (the ufunc will not touch those -locations). The warning can be supressed by using ``out=None``. diff --git a/doc/release/upcoming_changes/29819.improvement.rst b/doc/release/upcoming_changes/29819.improvement.rst deleted file mode 100644 index fa4ac07f2a08..000000000000 --- a/doc/release/upcoming_changes/29819.improvement.rst +++ /dev/null @@ -1,6 +0,0 @@ -Performance increase for scalar calculations --------------------------------------------- -The speed of calculations on scalars has been improved by about a factor 6 for -ufuncs that take only one input (like ``np.sin(scalar)``), reducing the speed -difference from their ``math`` equivalents from a factor 19 to 3 (the speed -for arrays is left unchanged). diff --git a/doc/release/upcoming_changes/29836.c_api.rst b/doc/release/upcoming_changes/29836.c_api.rst deleted file mode 100644 index 9ac5478c742a..000000000000 --- a/doc/release/upcoming_changes/29836.c_api.rst +++ /dev/null @@ -1,15 +0,0 @@ -New ``NPY_DT_get_constant`` slot for DType constant retrieval -------------------------------------------------------------- -A new slot ``NPY_DT_get_constant`` has been added to the DType API, allowing -dtype implementations to provide constant values such as machine limits and -special values. The slot function has the signature:: - - int get_constant(PyArray_Descr *descr, int constant_id, void *ptr) - -It returns 1 on success, 0 if the constant is not available, or -1 on error. -The function is always called with the GIL held and may write to unaligned memory. - -Integer constants (marked with the ``1 << 16`` bit) return ``npy_intp`` values, -while floating-point constants return values of the dtype's native type. - -Implementing this can be used by user DTypes to provide `numpy.finfo` values. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29836.improvement.rst b/doc/release/upcoming_changes/29836.improvement.rst deleted file mode 100644 index 0d7df429d125..000000000000 --- a/doc/release/upcoming_changes/29836.improvement.rst +++ /dev/null @@ -1,26 +0,0 @@ -``numpy.finfo`` Refactor ------------------------- -The ``numpy.finfo`` class has been completely refactored to obtain floating-point -constants directly from C compiler macros rather than deriving them at runtime. -This provides better accuracy, platform compatibility and corrected -several attribute calculations: - -* Constants like ``eps``, ``min``, ``max``, ``smallest_normal``, and - ``smallest_subnormal`` now come directly from standard C macros (``FLT_EPSILON``, - ``DBL_MIN``, etc.), ensuring platform-correct values. - -* The deprecated ``MachAr`` runtime discovery mechanism has been removed. - -* Derived attributes have been corrected to match standard definitions: - ``machep`` and ``negep`` now use ``int(log2(eps))``; ``nexp`` accounts for - all exponent patterns; ``nmant`` excludes the implicit bit; and ``minexp`` - follows the C standard definition. - -* longdouble constants, Specifically ``smallest_normal`` now follows the - C standard definitions as per respecitive platform. - -* Special handling added for PowerPC's IBM double-double format. - -* New test suite added in ``test_finfo.py`` to validate all - ``finfo`` properties against expected machine arithmetic values for - float16, float32, and float64 types. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29841.expired.rst b/doc/release/upcoming_changes/29841.expired.rst deleted file mode 100644 index 34977cec2f70..000000000000 --- a/doc/release/upcoming_changes/29841.expired.rst +++ /dev/null @@ -1,6 +0,0 @@ -Raise `TypeError` on attempt to convert array with `ndim > 0` to scalar ------------------------------------------------------------------------ -Conversion of an array with `ndim > 0` to a scalar was deprecated in -NumPy 1.25. Now, attempting to do so raises `TypeError`. -Ensure you extract a single element from your array before performing -this operation. diff --git a/doc/release/upcoming_changes/29900.c_api.rst b/doc/release/upcoming_changes/29900.c_api.rst deleted file mode 100644 index b29014ac95fc..000000000000 --- a/doc/release/upcoming_changes/29900.c_api.rst +++ /dev/null @@ -1,5 +0,0 @@ -A new `PyUFunc_AddLoopsFromSpecs` convenience function has been added to the C API. ------------------------------------------------------------------------------------ -This function allows adding multiple ufunc loops from their specs in one call using -a NULL-terminated array of `PyUFunc_LoopSlot` structs. It allows registering -sorting and argsorting loops using the new ArrayMethod API. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29900.new_feature.rst b/doc/release/upcoming_changes/29900.new_feature.rst deleted file mode 100644 index 1799b6043e29..000000000000 --- a/doc/release/upcoming_changes/29900.new_feature.rst +++ /dev/null @@ -1,9 +0,0 @@ -DType sorting and argsorting supports the ArrayMethod API ---------------------------------------------------------- -User-defined dtypes can now implement custom sorting and argsorting using -the ArrayMethod API. This mechanism can be used in place of the `PyArray_ArrFuncs` -slots which may be deprecated in the future. - -The sorting and argsorting methods are registered by passing the arraymethod -specs that implement the operations to the new `PyUFunc_AddLoopsFromSpecs` function. -See the ArrayMethod API documentation for details. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29909.expired.rst b/doc/release/upcoming_changes/29909.expired.rst deleted file mode 100644 index 6a2ee4f53c09..000000000000 --- a/doc/release/upcoming_changes/29909.expired.rst +++ /dev/null @@ -1,10 +0,0 @@ -Remove numpy.linalg.linalg and numpy.fft.helper ------------------------------------------------ - -The following were deprecated in NumPy 2.0 and have been moved to private modules - -* ``numpy.linalg.linalg`` - Use :mod:`numpy.linalg` instead. - -* ``numpy.fft.helper`` - Use :mod:`numpy.fft` instead. diff --git a/doc/release/upcoming_changes/29929.improvement.rst b/doc/release/upcoming_changes/29929.improvement.rst new file mode 100644 index 000000000000..633d8768a03f --- /dev/null +++ b/doc/release/upcoming_changes/29929.improvement.rst @@ -0,0 +1,19 @@ +For ``f2py``, the behaviour of ``intent(inplace)`` has improved. +Previously, if an input array did not have the right dtype or order, +the input array was modified in-place, changing its dtype and +replacing its data by a corrected copy. Now, instead, the corrected +copy is kept a separate array, which, after being passed and +presumably modified by the fortran routine, is copied back to the +input routine. The above means one no longer has the risk that +pre-existing views or slices of the input array start pointing to +unallocated memory (at the price of increased overhead for the +write-back copy at the end of the call). + +A potential problem would be that one might get very different results +if one, e.g., previously passed in an integer array where a double +array was expected: the writeback to integer would likely give wrong +results. To avoid such situations, ``intent(inplace)`` will now only +allow arrays that have equivalent type to that used in the fortran +routine, i.e., ``dtype.kind`` is the same. For instance, a routine +expecting double would be able to receive float, but would raise on +integer input. diff --git a/doc/release/upcoming_changes/29947.improvement.rst b/doc/release/upcoming_changes/29947.improvement.rst deleted file mode 100644 index 99c67e598347..000000000000 --- a/doc/release/upcoming_changes/29947.improvement.rst +++ /dev/null @@ -1,7 +0,0 @@ -Multiple axes are now supported in ``numpy.trim_zeros`` -------------------------------------------------------- -The ``axis`` argument of `numpy.trim_zeros` now accepts a sequence; for example -``np.trim_zeros(x, axis=(0, 1))`` will trim the zeros from a multi-dimensional -array ``x`` along axes 0 and 1. This fixes issue -`gh‑29945 `__ and was implemented -in pull request `gh‑29947 `__. diff --git a/doc/release/upcoming_changes/29973.expired.rst b/doc/release/upcoming_changes/29973.expired.rst deleted file mode 100644 index 5b51cb7cf428..000000000000 --- a/doc/release/upcoming_changes/29973.expired.rst +++ /dev/null @@ -1,12 +0,0 @@ -Remove ``interpolation`` parameter from quantile and percentile functions -------------------------------------------------------------------------- - -The ``interpolation`` parameter was deprecated in NumPy 1.22.0 and has been -removed from the following functions: - -* ``numpy.percentile`` -* ``numpy.nanpercentile`` -* ``numpy.quantile`` -* ``numpy.nanquantile`` - -Use the ``method`` parameter instead. diff --git a/doc/release/upcoming_changes/29978.expired.rst b/doc/release/upcoming_changes/29978.expired.rst deleted file mode 100644 index e0f4de1d8715..000000000000 --- a/doc/release/upcoming_changes/29978.expired.rst +++ /dev/null @@ -1,4 +0,0 @@ -Removed ``numpy.in1d`` ----------------------- - -``numpy.in1d`` has been deprecated since NumPy 2.0 and is now removed in favor of ``numpy.isin``. diff --git a/doc/release/upcoming_changes/29980.expired.rst b/doc/release/upcoming_changes/29980.expired.rst deleted file mode 100644 index 563ba8aa6929..000000000000 --- a/doc/release/upcoming_changes/29980.expired.rst +++ /dev/null @@ -1,5 +0,0 @@ -Removed ``numpy.ndindex.ndincr()`` ----------------------------------- - -The ``ndindex.ndincr()`` method has been deprecated since NumPy 1.20 and is now removed; -use ``next(ndindex)`` instead. diff --git a/doc/release/upcoming_changes/29984.expired.rst b/doc/release/upcoming_changes/29984.expired.rst deleted file mode 100644 index bcce0dedd4a7..000000000000 --- a/doc/release/upcoming_changes/29984.expired.rst +++ /dev/null @@ -1,5 +0,0 @@ -Removed ``fix_imports`` parameter from ``numpy.save`` ------------------------------------------------------ - -The ``fix_imports`` parameter was deprecated in NumPy 2.1.0 and is now removed. -This flag has been ignored since NumPy 1.17 and was only needed to support loading files in Python 2 that were written in Python 3. diff --git a/doc/release/upcoming_changes/29986.expired.rst b/doc/release/upcoming_changes/29986.expired.rst deleted file mode 100644 index 2a6b44380dd4..000000000000 --- a/doc/release/upcoming_changes/29986.expired.rst +++ /dev/null @@ -1,10 +0,0 @@ -Removal of four undocumented ``ndarray.ctypes`` methods -------------------------------------------------------- -Four undocumented methods of the ``ndarray.ctypes`` object have been removed: - -* ``_ctypes.get_data()`` (use ``_ctypes.data`` instead) -* ``_ctypes.get_shape()`` (use ``_ctypes.shape`` instead) -* ``_ctypes.get_strides()`` (use ``_ctypes.strides`` instead) -* ``_ctypes.get_as_parameter()`` (use ``_ctypes._as_parameter_`` instead) - -These methods have been deprecated since NumPy 1.21. diff --git a/doc/release/upcoming_changes/29994.expired.rst b/doc/release/upcoming_changes/29994.expired.rst deleted file mode 100644 index 11331da6e810..000000000000 --- a/doc/release/upcoming_changes/29994.expired.rst +++ /dev/null @@ -1,6 +0,0 @@ -Remove ``newshape`` parameter from ``numpy.reshape`` ----------------------------------------------------- - -The ``newshape`` parameter was deprecated in NumPy 2.1.0 and has been -removed from ``numpy.reshape``. Pass it positionally or use ``shape=`` -on newer NumPy versions. diff --git a/doc/release/upcoming_changes/29997.expired.rst b/doc/release/upcoming_changes/29997.expired.rst deleted file mode 100644 index 6bdfa792e4e6..000000000000 --- a/doc/release/upcoming_changes/29997.expired.rst +++ /dev/null @@ -1,9 +0,0 @@ -Removal of deprecated functions and arguments ---------------------------------------------- - -The following long-deprecated APIs have been removed: - -* ``numpy.trapz`` — deprecated since NumPy 2.0 (2023-08-18). Use ``numpy.trapezoid`` or - ``scipy.integrate`` functions instead. -* ``disp`` function — deprecated from 2.0 release and no longer functional. Use your own printing function instead. -* ``bias`` and ``ddof`` arguments in ``numpy.corrcoef`` — these had no effect since NumPy 1.10. diff --git a/doc/release/upcoming_changes/30021.expired.rst b/doc/release/upcoming_changes/30021.expired.rst deleted file mode 100644 index 31ca300ce35f..000000000000 --- a/doc/release/upcoming_changes/30021.expired.rst +++ /dev/null @@ -1,5 +0,0 @@ -Remove ``delimitor`` parameter from ``numpy.ma.mrecords.fromtextfile()`` ------------------------------------------------------------------------- - -The ``delimitor`` parameter was deprecated in NumPy 1.22.0 and has been -removed from ``numpy.ma.mrecords.fromtextfile()``. Use ``delimiter`` instead. diff --git a/doc/release/upcoming_changes/30068.expired.rst b/doc/release/upcoming_changes/30068.expired.rst deleted file mode 100644 index 5d41c98b3260..000000000000 --- a/doc/release/upcoming_changes/30068.expired.rst +++ /dev/null @@ -1,12 +0,0 @@ -``numpy.array2string`` and ``numpy.sum`` deprecations finalized ---------------------------------------------------------------- - -The following long-deprecated APIs have been removed or converted to errors: - -* The ``style`` parameter has been removed from ``numpy.array2string``. - This argument had no effect since Numpy 1.14.0. Any arguments following - it, such as ``formatter`` have now been made keyword-only. - -* Calling ``np.sum(generator)`` directly on a generator object now raises a `TypeError`. - This behavior was deprecated in NumPy 1.15.0. Use ``np.sum(np.fromiter(generator))`` - or the python ``sum`` builtin instead. diff --git a/doc/release/upcoming_changes/30147.compatibility.rst b/doc/release/upcoming_changes/30147.compatibility.rst deleted file mode 100644 index c5d13323fe6e..000000000000 --- a/doc/release/upcoming_changes/30147.compatibility.rst +++ /dev/null @@ -1,4 +0,0 @@ -* Type-checkers will no longer accept calls to `numpy.arange` with - ``start`` as a keyword argument. This was done for compatibility with - the Array API standard. At runtime it is still possible to use - `numpy.arange` with ``start`` as a keyword argument. diff --git a/doc/release/upcoming_changes/30168.deprecation.rst b/doc/release/upcoming_changes/30168.deprecation.rst deleted file mode 100644 index 81673397590d..000000000000 --- a/doc/release/upcoming_changes/30168.deprecation.rst +++ /dev/null @@ -1,5 +0,0 @@ -``np.fix`` is pending deprecation ---------------------------------- -The `numpy.fix` function will be deprecated in a future release. It is recommended to use -`numpy.trunc` instead, as it provides the same functionality of truncating decimal values to their -integer parts. Static type checkers might already report a warning for the use of `numpy.fix`. diff --git a/doc/release/upcoming_changes/30179.new_feature.rst b/doc/release/upcoming_changes/30179.new_feature.rst deleted file mode 100644 index e19815289351..000000000000 --- a/doc/release/upcoming_changes/30179.new_feature.rst +++ /dev/null @@ -1,13 +0,0 @@ -New ``__numpy_dtype__`` protocol --------------------------------- -NumPy now has a new ``__numpy_dtype__`` protocol. NumPy will check -for this attribute when converting to a NumPy dtype via ``np.dtype(obj)`` -or any ``dtype=`` argument. - -Downstream projects are encouraged to implement this for all dtype like -objects which may previously have used a ``.dtype`` attribute that returned -a NumPy dtype. -We expect to deprecate ``.dtype`` in the future to prevent interpreting -array-like objects with a ``.dtype`` attribute as a dtype. -If you wish you can implement ``__numpy_dtype__`` to ensure an earlier -warning or error (``.dtype`` is ignored if this is found). diff --git a/doc/release/upcoming_changes/30181.deprecation.rst b/doc/release/upcoming_changes/30181.deprecation.rst new file mode 100644 index 000000000000..c9ca61dd67f9 --- /dev/null +++ b/doc/release/upcoming_changes/30181.deprecation.rst @@ -0,0 +1,6 @@ +Resizing a Numpy array inplace is deprecated +-------------------------------------------- +Resizing a Numpy array inplace is deprecated since mutating +an array is unsafe if an array is shared, especially by multiple +threads. As an alternative, you can create a resized array via ``np.resize``. + diff --git a/doc/release/upcoming_changes/30208.highlight.rst b/doc/release/upcoming_changes/30208.highlight.rst deleted file mode 100644 index c13c46c056cc..000000000000 --- a/doc/release/upcoming_changes/30208.highlight.rst +++ /dev/null @@ -1,2 +0,0 @@ -* Runtime signature introspection support has been significantly improved. See the - corresponding improvement note for details. diff --git a/doc/release/upcoming_changes/30208.improvement.rst b/doc/release/upcoming_changes/30208.improvement.rst deleted file mode 100644 index ad9faaedfb6b..000000000000 --- a/doc/release/upcoming_changes/30208.improvement.rst +++ /dev/null @@ -1,12 +0,0 @@ -Runtime signature introspection support has been significantly improved ------------------------------------------------------------------------ -Many NumPy functions, classes, and methods that previously raised ``ValueError`` when passed -to ``inspect.signature()`` now return meaningful signatures. This improves support for runtime type -checking, IDE autocomplete, documentation generation, and runtime introspection capabilities across -the NumPy API. - -Over three hundred classes and functions have been updated in total, including, but not limited to, -core classes such as `ndarray`, `generic`, `dtype`, `ufunc`, `broadcast`, `nditer`, etc., -most methods of `ndarray` and scalar types, array constructor functions (`array`, `empty`, -`arange`, `fromiter`, etc.), all :ref:`ufuncs`, and many other commonly used functions, including -`dot`, `concat`, `where`, `bincount`, `can_cast`, and numerous others. diff --git a/doc/release/upcoming_changes/30282.deprecation.rst b/doc/release/upcoming_changes/30282.deprecation.rst deleted file mode 100644 index e9aac9ae17d5..000000000000 --- a/doc/release/upcoming_changes/30282.deprecation.rst +++ /dev/null @@ -1,5 +0,0 @@ -in-place modification of ``ndarray.shape`` is pending deprecation ------------------------------------------------------------------ -Setting the `ndarray.shape` attribute directly will be deprecated in a future release. -Instead of modifying the shape in place, it is recommended to use the `numpy.reshape` function. -Static type checkers might already report a warning for assignments to `ndarray.shape`. diff --git a/doc/release/upcoming_changes/30284.deprecation.rst b/doc/release/upcoming_changes/30284.deprecation.rst deleted file mode 100644 index 8803f3225cd3..000000000000 --- a/doc/release/upcoming_changes/30284.deprecation.rst +++ /dev/null @@ -1,3 +0,0 @@ -Deprecation of ``numpy.lib.user_array.container`` -------------------------------------------------- -The ``numpy.lib.user_array.container`` class is deprecated and will be removed in a future version. diff --git a/doc/release/upcoming_changes/30340.expired.rst b/doc/release/upcoming_changes/30340.expired.rst new file mode 100644 index 000000000000..79dd57dde737 --- /dev/null +++ b/doc/release/upcoming_changes/30340.expired.rst @@ -0,0 +1 @@ +* ``numpy.distutils`` has been removed diff --git a/doc/release/upcoming_changes/30381.new_feature.rst b/doc/release/upcoming_changes/30381.new_feature.rst new file mode 100644 index 000000000000..8dc3ce34e3bc --- /dev/null +++ b/doc/release/upcoming_changes/30381.new_feature.rst @@ -0,0 +1,11 @@ +Pixi package definitions +------------------------ +Pixi package definitions have been added for different kinds +of from-source builds of NumPy. These can be used in +downstream Pixi workspaces via the ``pixi-build`` feature. + +Definitions for both ``default`` and AddressSanitizer-instrumented +(``asan``) builds are available in the source code under the +``pixi-packages/`` directory. + +``linux-64`` and ``osx-arm64`` platforms are supported. diff --git a/doc/release/upcoming_changes/30411.compatibility.rst b/doc/release/upcoming_changes/30411.compatibility.rst new file mode 100644 index 000000000000..54ba1b1fd32d --- /dev/null +++ b/doc/release/upcoming_changes/30411.compatibility.rst @@ -0,0 +1,18 @@ +``linalg.eig`` and ``linalg.eigvals`` now always return complex arrays +---------------------------------------------------------------------- + +Previously, the return values depended on whether the eigenvalues happen to lie +on the real line (which, for a general, non-symmetric matrix, is not guaranteed). + +The change makes consistent what was a value-dependent result. To retain the +previous behavior, do:: + + w = eigvals(a) + if np.any(w.imag == 0): # this is what NumPy used to do + w = w.real + +If your matrix is symmetrix/hermitian, use ``eigh`` and ``eigvalsh`` instead of +``eig`` and ``eigvals``. These are guaranteed to return real values. A common +case is covariance matrices, which are symmetric and positive definite by +construction. + diff --git a/doc/release/upcoming_changes/30460.expired.rst b/doc/release/upcoming_changes/30460.expired.rst new file mode 100644 index 000000000000..5fb6bf470866 --- /dev/null +++ b/doc/release/upcoming_changes/30460.expired.rst @@ -0,0 +1 @@ +* Passing ``None`` as dtype to ``np.finfo`` will now raise a ``TypeError`` (deprecated since 1.25) diff --git a/doc/release/upcoming_changes/30461.expired.rst b/doc/release/upcoming_changes/30461.expired.rst new file mode 100644 index 000000000000..e9d05eda1b7b --- /dev/null +++ b/doc/release/upcoming_changes/30461.expired.rst @@ -0,0 +1 @@ +* ``numpy.cross`` no longer supports 2-dimensional vectors (deprecated since 2.0) diff --git a/doc/release/upcoming_changes/30462.expired.rst b/doc/release/upcoming_changes/30462.expired.rst new file mode 100644 index 000000000000..ee8b62796640 --- /dev/null +++ b/doc/release/upcoming_changes/30462.expired.rst @@ -0,0 +1 @@ +* ``numpy._core.numerictypes.maximum_sctype`` has been removed (deprecated since 2.0) diff --git a/doc/release/upcoming_changes/30463.expired.rst b/doc/release/upcoming_changes/30463.expired.rst new file mode 100644 index 000000000000..232448966104 --- /dev/null +++ b/doc/release/upcoming_changes/30463.expired.rst @@ -0,0 +1,2 @@ +* ``numpy.row_stack`` has been removed in favor of ``numpy.vstack`` (deprecated since 2.0). +* ``get_array_wrap`` has been removed (deprecated since 2.0). diff --git a/doc/release/upcoming_changes/30467.expired.rst b/doc/release/upcoming_changes/30467.expired.rst new file mode 100644 index 000000000000..3474787b2e1f --- /dev/null +++ b/doc/release/upcoming_changes/30467.expired.rst @@ -0,0 +1 @@ +* ``recfromtxt`` and ``recfromcsv`` have been removed from ``numpy.lib._npyio`` in favor of ``numpy.genfromtxt`` (deprecated since 2.0). diff --git a/doc/release/upcoming_changes/30480.typing.rst b/doc/release/upcoming_changes/30480.typing.rst new file mode 100644 index 000000000000..fa27a8ecbe37 --- /dev/null +++ b/doc/release/upcoming_changes/30480.typing.rst @@ -0,0 +1,8 @@ +``numpy.linalg`` typing improvements and preliminary shape-typing support +------------------------------------------------------------------------- +Input and output dtypes for ``numpy.linalg`` functions are now more precise. Several of these +functions also gain preliminary shape-typing support while remaining backward compatible. +For example, the return type of ``numpy.linalg.matmul`` now depends on the shape-type of its inputs, +or fall back to the backward-compatible return type if the shape-types are unknown at type-checking +time. Because of limitations in Python's type system and current type-checkers, shape-typing cannot +cover every situation and is often only implemented for the most common lower-rank cases. diff --git a/doc/release/upcoming_changes/30489.compatibility.rst b/doc/release/upcoming_changes/30489.compatibility.rst new file mode 100644 index 000000000000..6eb1387fab6b --- /dev/null +++ b/doc/release/upcoming_changes/30489.compatibility.rst @@ -0,0 +1,5 @@ +MSVC support +------------ +NumPy now requires minimum MSVC 19.35 toolchain version on +Windows platforms. This corresponds to Visual Studio 2022 +version 17.5 Preview 2 or newer. \ No newline at end of file diff --git a/doc/release/upcoming_changes/30517.performance.rst b/doc/release/upcoming_changes/30517.performance.rst new file mode 100644 index 000000000000..df15498f7470 --- /dev/null +++ b/doc/release/upcoming_changes/30517.performance.rst @@ -0,0 +1,8 @@ +Improved performance of ``numpy.searchsorted`` +---------------------------------------------- +The C++ binary search implementation used by ``numpy.searchsorted`` now has +a much better performance when searching for multiple keys. The new +implementation batches binary search steps across all keys to leverage cache +locality and out-of-order execution. Benchmarks show the new implementation can +be up to 20 times faster for hundreds of thousands keys while single-key +performance remains comparable to previous versions. \ No newline at end of file diff --git a/doc/release/upcoming_changes/30538.change.rst b/doc/release/upcoming_changes/30538.change.rst new file mode 100644 index 000000000000..8372e94b9bd8 --- /dev/null +++ b/doc/release/upcoming_changes/30538.change.rst @@ -0,0 +1,18 @@ +``numpy.ctypeslib.as_ctypes`` now does not support scalar types +---------------------------------------------------------------- +The function ``numpy.ctypeslib.as_ctypes`` has been updated to only accept ``numpy.ndarray``. +Passing a scalar type (e.g., ``numpy.int32(5)``) will now raise a ``TypeError``. +This change was made to avoid the issue `gh-30354 `__ +and to enforce the readonly nature of scalar types in NumPy. +The previous behavior relied on undocumented implicit temporary arrays and was not well-defined. +Users who need to convert scalar types to ctypes should first convert them to an array +(e.g., ``numpy.asarray``) before passing them to ``numpy.ctypeslib.as_ctypes``. + + +``__array_interface__`` changes on scalars +------------------------------------------ +Scalars now export the ``__array_interface__`` directly rather than including +an array copy as a ``__ref`` entry. This means that scalars are now exported +as read-only while they previously exported as writeable. +The path via ``__ref`` was undocumented and not consistently used even +within NumPy itself. diff --git a/doc/release/upcoming_changes/30566.typing.rst b/doc/release/upcoming_changes/30566.typing.rst new file mode 100644 index 000000000000..fd9aabf85b6f --- /dev/null +++ b/doc/release/upcoming_changes/30566.typing.rst @@ -0,0 +1,5 @@ +``numpy.ma`` typing annotations +------------------------------- +The ``numpy.ma`` module is now fully covered by typing annotations. +This includes annotations for masked arrays, masks, and various functions and methods. +With this, NumPy has achieved 100% typing coverage across all its submodules. diff --git a/doc/release/upcoming_changes/30604.expired.rst b/doc/release/upcoming_changes/30604.expired.rst new file mode 100644 index 000000000000..50cab89f3c3a --- /dev/null +++ b/doc/release/upcoming_changes/30604.expired.rst @@ -0,0 +1 @@ +* The ``numpy.chararray`` re-export of ``numpy.char.chararray`` has been removed (deprecated since 2.0). diff --git a/doc/release/upcoming_changes/30605.deprecation.rst b/doc/release/upcoming_changes/30605.deprecation.rst new file mode 100644 index 000000000000..062160f210ef --- /dev/null +++ b/doc/release/upcoming_changes/30605.deprecation.rst @@ -0,0 +1 @@ +* ``numpy.char.chararray`` is deprecated. Use an ``ndarray`` with a string or bytes dtype instead. diff --git a/doc/release/upcoming_changes/30610.expired.rst b/doc/release/upcoming_changes/30610.expired.rst new file mode 100644 index 000000000000..eb806c954b16 --- /dev/null +++ b/doc/release/upcoming_changes/30610.expired.rst @@ -0,0 +1 @@ +* ``bincount`` now raises a ``TypeError`` for non-integer inputs (deprecated since 2.1). diff --git a/doc/release/upcoming_changes/30612.expired.rst b/doc/release/upcoming_changes/30612.expired.rst new file mode 100644 index 000000000000..1e29d3c96d53 --- /dev/null +++ b/doc/release/upcoming_changes/30612.expired.rst @@ -0,0 +1 @@ +* The ``numpy.lib.math`` alias for the standard library ``math`` module has been removed (deprecated since 1.25). diff --git a/doc/release/upcoming_changes/30613.expired.rst b/doc/release/upcoming_changes/30613.expired.rst new file mode 100644 index 000000000000..89610f3577e6 --- /dev/null +++ b/doc/release/upcoming_changes/30613.expired.rst @@ -0,0 +1 @@ +* Data type alias ``'a'`` was removed in favor of ``'S'`` (deprecated since 2.0). diff --git a/doc/release/upcoming_changes/30614.expired.rst b/doc/release/upcoming_changes/30614.expired.rst new file mode 100644 index 000000000000..e0d95d2a75fc --- /dev/null +++ b/doc/release/upcoming_changes/30614.expired.rst @@ -0,0 +1 @@ +* ``_add_newdoc_ufunc(ufunc, newdoc)`` has been removed in favor of ``ufunc.__doc__ = newdoc`` (deprecated in 2.2) diff --git a/doc/release/upcoming_changes/30615.deprecation.rst b/doc/release/upcoming_changes/30615.deprecation.rst new file mode 100644 index 000000000000..4f95a3bed811 --- /dev/null +++ b/doc/release/upcoming_changes/30615.deprecation.rst @@ -0,0 +1,7 @@ +* `numpy.take` now correctly checks if the result can be cast to + the provided ``out=out`` under the same-kind rule. + A ``DeprecationWarning`` is given now when this check fails. + Previously, ``take`` incorrectly checked if ``out`` could be cast to + the result (the wrong direction). + This deprecation also affects ``compress`` and possibly other functions. + (Future versions of NumPy may tighten the casting check further.) diff --git a/doc/release/upcoming_changes/30644.deprecation.rst b/doc/release/upcoming_changes/30644.deprecation.rst new file mode 100644 index 000000000000..41219eca7e94 --- /dev/null +++ b/doc/release/upcoming_changes/30644.deprecation.rst @@ -0,0 +1,6 @@ +``numpy.fix`` is deprecated +--------------------------- + +`numpy.fix` is deprecated. Use `numpy.trunc` instead, which is faster +and follows the Array API standard. Both functions provide identical +functionality: rounding array elements towards zero. diff --git a/doc/release/upcoming_changes/30653.new_feature.rst b/doc/release/upcoming_changes/30653.new_feature.rst new file mode 100644 index 000000000000..0b79fd25fc77 --- /dev/null +++ b/doc/release/upcoming_changes/30653.new_feature.rst @@ -0,0 +1,6 @@ +``numpy.ndarray`` now supports structural pattern matching +---------------------------------------------------------- +`numpy.ndarray` and its subclasses now have the ``Py_TPFLAGS_SEQUENCE`` flag +set, enabling structural pattern matching (PEP 634) with ``match``/``case`` +statements. This also enables Cython to optimize integer indexing operations. +See :ref:`arrays.ndarray.pattern-matching` for details. diff --git a/doc/release/upcoming_changes/30707.change.rst b/doc/release/upcoming_changes/30707.change.rst new file mode 100644 index 000000000000..7f3846b6de97 --- /dev/null +++ b/doc/release/upcoming_changes/30707.change.rst @@ -0,0 +1,4 @@ +``meshgrid`` now always returns a tuple +--------------------------------------- +``np.meshgrid`` previously used to return a list when ``sparse`` was true and ``copy`` was false. +Now, it always returns a tuple regardless of the arguments. diff --git a/doc/release/upcoming_changes/30738.deprecation.rst b/doc/release/upcoming_changes/30738.deprecation.rst new file mode 100644 index 000000000000..381117ec84cc --- /dev/null +++ b/doc/release/upcoming_changes/30738.deprecation.rst @@ -0,0 +1,4 @@ +``numpy.ma.round_`` is deprecated +--------------------------------- +``numpy.ma.round_`` is deprecated. +``numpy.ma.round`` can be used as a replacement. diff --git a/doc/release/upcoming_changes/30770.compatibility.rst b/doc/release/upcoming_changes/30770.compatibility.rst new file mode 100644 index 000000000000..a1987f77e4c5 --- /dev/null +++ b/doc/release/upcoming_changes/30770.compatibility.rst @@ -0,0 +1,28 @@ +Cython support +-------------- + +NumPy's Cython headers (accessed via ``cimport numpy``) now require +Cython 3.0 or newer to build. If you try to compile a project that depends on +NumPy's Cython headers using Cython 0.29 or older, you will see a message like +this: + +:: + + Error compiling Cython file: + ------------------------------------------------------------ + ... + # versions. + # + # See __init__.cython-30.pxd for the real Cython header + # + + DEF err = int('Build aborted: the NumPy Cython headers require Cython 3.0.0 or newer.') + ------------------------------------------------------------ + + /path/to/site-packages/numpy/__init__.pxd:11:13: Error in compile-time expression: ValueError: invalid literal for int() with base 10: 'Build aborted: the NumPy Cython headers require Cython 3.0.0 or newer.' + + +Note that the invalid integer is not a bug in NumPy - we are intentionally +generating this error to avoid triggering a more obscure error later in the +build when an older Cython version tries to use a Cython feature that was not +available in the old Cython version. diff --git a/doc/release/upcoming_changes/30774.deprecation.rst b/doc/release/upcoming_changes/30774.deprecation.rst new file mode 100644 index 000000000000..00b941ef3a8c --- /dev/null +++ b/doc/release/upcoming_changes/30774.deprecation.rst @@ -0,0 +1,4 @@ +``typename`` is deprecated +-------------------------- +``numpy.typename`` is deprecated because the names returned by it were outdated and inconsistent. +``numpy.dtype.name`` can be used as a replacement. diff --git a/doc/release/upcoming_changes/30802.deprecation.rst b/doc/release/upcoming_changes/30802.deprecation.rst new file mode 100644 index 000000000000..82fe6672b885 --- /dev/null +++ b/doc/release/upcoming_changes/30802.deprecation.rst @@ -0,0 +1 @@ +* The ``numpy.char.[as]array`` functions are deprecated. Use an ``numpy.[as]array`` with a string or bytes dtype instead. diff --git a/doc/release/upcoming_changes/30803.compatibility.rst b/doc/release/upcoming_changes/30803.compatibility.rst new file mode 100644 index 000000000000..3fc75cae53ab --- /dev/null +++ b/doc/release/upcoming_changes/30803.compatibility.rst @@ -0,0 +1,8 @@ +``numpy.where`` no longer truncates Python integers +--------------------------------------------------- + +Previously, if the ``x`` or ``y`` argument of ``numpy.where`` was a Python integer +that was out of range of the output type, it would be silently truncated. +Now, an `OverflowError` will be raised instead. + +This change also applies to the underlying C API function ``PyArray_Where``. diff --git a/doc/release/upcoming_changes/30846.compatibility.rst b/doc/release/upcoming_changes/30846.compatibility.rst new file mode 100644 index 000000000000..68a6685f6673 --- /dev/null +++ b/doc/release/upcoming_changes/30846.compatibility.rst @@ -0,0 +1,4 @@ +Default memory allocator change +------------------------------- +NumPy now uses ``PyMem_RawMalloc`` and ``PyMem_RawFree`` as the default memory allocator, +instead of system's ``malloc`` and ``free`` directly. diff --git a/doc/release/upcoming_changes/30846.performance.rst b/doc/release/upcoming_changes/30846.performance.rst new file mode 100644 index 000000000000..39d2d68cbda3 --- /dev/null +++ b/doc/release/upcoming_changes/30846.performance.rst @@ -0,0 +1,18 @@ +Improved scaling of ufuncs on free-threading +-------------------------------------------- + +NumPy's ufuncs now scale significantly better on free-threading builds +of CPython due to the following optimizations: + +* **Lock-free dispatch table:** The ufuncs dispatch table is now + implemented as a lock-free concurrent hash map, allowing multiple threads + to call ufuncs without contention. + +* **Immortal shared objects:** Certain shared objects, such as global memory + handlers, have been made immortal. This effectively reduces reference + counting contention across threads. + +* **Optimized memory allocation:** NumPy now utilizes ``PyMem_RawMalloc`` and + ``PyMem_RawFree`` for memory allocation. On Python 3.15 and newer, this + leverages ``mimalloc`` and significantly reduces memory allocation overhead + in multi-threaded workloads. \ No newline at end of file diff --git a/doc/release/upcoming_changes/30857.new_feature.rst b/doc/release/upcoming_changes/30857.new_feature.rst new file mode 100644 index 000000000000..a6af45dcbbcc --- /dev/null +++ b/doc/release/upcoming_changes/30857.new_feature.rst @@ -0,0 +1,5 @@ +Added N-D evaluation functions to the polynomial package +-------------------------------------------------------- +New functions ``polyvalnd``, ``chebvalnd``, ``legvalnd``, ``hermvalnd``, +``hermevalnd``, and ``lagvalnd`` have been added to evaluate polynomials +in arbitrary dimensions, analogous to the existing 2D and 3D evaluators. \ No newline at end of file diff --git a/doc/release/upcoming_changes/30869.change.rst b/doc/release/upcoming_changes/30869.change.rst new file mode 100644 index 000000000000..b7ba697aaa7f --- /dev/null +++ b/doc/release/upcoming_changes/30869.change.rst @@ -0,0 +1,3 @@ +``numpy.triu_indices`` now accepts ``unsigned integers`` +-------------------------------------------------------- +``numpy.triu_indices`` previously used to error in some cases when ``unsigned integers`` were given as arguments. Now, it accepts them in all cases. diff --git a/doc/release/upcoming_changes/30869.deprecation.rst b/doc/release/upcoming_changes/30869.deprecation.rst new file mode 100644 index 000000000000..f193353cb737 --- /dev/null +++ b/doc/release/upcoming_changes/30869.deprecation.rst @@ -0,0 +1,7 @@ +Inputs other than ``integers`` is deprecated +-------------------------------------------- +Inputs other than integers is deprecated for ``numpy.triu_indices`` and ``numpy.tril_indices``. + +The ``M``, ``k`` and ``N`` parameters of ``numpy.tri`` also deprecate non-integer arguments. + +The ``k`` parameter of both ``numpy.tril_indices_from`` and ``numpy.triu_indices_from`` deprecates non-integer arguments. diff --git a/doc/release/upcoming_changes/30937.compatibility.rst b/doc/release/upcoming_changes/30937.compatibility.rst new file mode 100644 index 000000000000..7d624132f0c2 --- /dev/null +++ b/doc/release/upcoming_changes/30937.compatibility.rst @@ -0,0 +1,8 @@ +``from_dlpack`` raises ``BufferError`` instead of ``RuntimeError`` +------------------------------------------------------------------ + +``np.from_dlpack`` now raises ``BufferError`` instead of ``RuntimeError`` +when the incoming DLPack tensor has an unsupported device, dtype, or +exceeds the maximum number of dimensions. This aligns with the DLPack +and Array API specifications, which recommend ``BufferError`` for data +that cannot be imported. diff --git a/doc/release/upcoming_changes/30965.improvement.rst b/doc/release/upcoming_changes/30965.improvement.rst new file mode 100644 index 000000000000..aae0f8beaa6c --- /dev/null +++ b/doc/release/upcoming_changes/30965.improvement.rst @@ -0,0 +1,4 @@ +``f2py`` modules now show allocatable arrays in ``dir()`` +--------------------------------------------------------- +Allocatable module variables wrapped by ``f2py`` now appear in ``dir()`` +output, matching their accessibility by name. diff --git a/doc/release/upcoming_changes/30984.capi.rst b/doc/release/upcoming_changes/30984.capi.rst new file mode 100644 index 000000000000..e2a41c10424f --- /dev/null +++ b/doc/release/upcoming_changes/30984.capi.rst @@ -0,0 +1,5 @@ +* It is now possible to register ``"real"`` and ``"imag"`` + ArrayMethods via ``PyUFunc_AddLoopsFromSpecs``. These will + be used for ``imag`` and ``real`` and should normally + set ``*view_offset`` in their ``resolve_descriptors`` function + to allow the array attributes to return views. diff --git a/doc/release/upcoming_changes/30984.change.rst b/doc/release/upcoming_changes/30984.change.rst new file mode 100644 index 000000000000..16bb631b30e9 --- /dev/null +++ b/doc/release/upcoming_changes/30984.change.rst @@ -0,0 +1,15 @@ +``object`` dtype in ``.real`` and ``.imag`` and related functions +----------------------------------------------------------------- +The array attributes ``.real`` and ``.imag`` now behave differently +for object arrays and return ``getattr(element, "real", element)`` +or ``getattr(element, "imag", 0)`` elementwise. +Additionally, the return for both is now read-only to avoid possible +in-place changes having no effect. + +This change also affects ``np.isreal()`` which uses ``arr.imag``. + +Previously, ``.imag`` always returned ``0`` while ``.real`` returned +the original array unmodified. +The new behavior now returnes the correct values for complex Python +objects but may also lead to surprises for example if ``element.real()`` +is a method and not a property. diff --git a/doc/release/upcoming_changes/30994.c_api.rst b/doc/release/upcoming_changes/30994.c_api.rst new file mode 100644 index 000000000000..5a704405ec98 --- /dev/null +++ b/doc/release/upcoming_changes/30994.c_api.rst @@ -0,0 +1,4 @@ +* Added ``PyDataType_TYPE``, ``PyDataType_KIND``, ``PyDataType_BYTEORDER`` and + ``PyDataType_TYPEOBJ`` accessor macros to the C API. Together with the other + accessor macros added for the NumPy 2.0 transition, these allow accessing the + fields of ``PyArray_Descr`` structs without any direct field accesses. diff --git a/doc/release/upcoming_changes/31067.c_api.rst b/doc/release/upcoming_changes/31067.c_api.rst new file mode 100644 index 000000000000..d9ab80c1470a --- /dev/null +++ b/doc/release/upcoming_changes/31067.c_api.rst @@ -0,0 +1,8 @@ +``PyArray_DescrFromScalar`` now preserves parametric dtype information +---------------------------------------------------------------------- +``PyArray_DescrFromScalar`` now correctly returns the full dtype descriptor for +scalars of user-defined parametric data types, including any dtype parameters. +Previously, parameters were silently discarded, which could cause incorrect +results in operations like ``astype`` on scalar objects. Internally, the +function now delegates to ``discover_descr_from_pyobject``, which handles +parametric dtypes correctly. diff --git a/doc/release/upcoming_changes/31172.typing.rst b/doc/release/upcoming_changes/31172.typing.rst new file mode 100644 index 000000000000..739583f79eaf --- /dev/null +++ b/doc/release/upcoming_changes/31172.typing.rst @@ -0,0 +1,22 @@ +Shape-typing support for many functions and methods +--------------------------------------------------- +Many functions and methods now have shape-aware return type annotations. +Type-checkers can now infer the number of dimensions of the returned array +through common operations. For example, ``np.linspace(0, 1)`` is now typed +as a 1-d ``float64`` array, and ``np.sum(x, keepdims=True)`` has the same +number of dimensions as ``x``. + +This covers ``numpy.linalg`` functions, array creation functions (like +``asarray``, ``from{buffer,string,file,iter,regex}``), range functions (``linspace``, +``logspace``, ``geomspace``), aggregation functions and methods (``sum``, +``mean``, ``std``, ``var``, ``min``, ``max``, ``all``, ``any``, etc.), sorting +(``sort``, ``argsort``, ``argpartition``), cumulative operations (``cumsum``, +``cumprod``, etc.), set operations (``unique_values``, ``intersect1d``, +``union1d``, etc.), and various other functions including ``nonzero``, +``transpose``, ``diagonal``, ``atleast_{1,2,3}d``, ``clip``, ``round``, +``inner``, ``bincount``, and ``fft.fftfreq``. Several of these also gained +more precise return dtype annotations as part of this work. + +Shape-typing is still a work-in-progress, so coverage is not yet complete. +Because of limitations in Python's type system and current type-checkers, +shape-typing is often only implemented for the most common lower-rank cases. diff --git a/doc/release/upcoming_changes/31226.typing.rst b/doc/release/upcoming_changes/31226.typing.rst new file mode 100644 index 000000000000..d4ea37779cf7 --- /dev/null +++ b/doc/release/upcoming_changes/31226.typing.rst @@ -0,0 +1,6 @@ +``numpy.fft`` typing improvements and preliminary shape-typing support +---------------------------------------------------------------------- +The ``numpy.fft`` functions now support non-``float64``/``complex128`` dtypes and gain +preliminary shape-typing support. For example, the return type of ``numpy.fft.fft`` now +depends on the shape-type of its inputs, falling back to the backward-compatible return +type when the shape-types are unknown at type-checking time. diff --git a/doc/release/upcoming_changes/31238.compatibility.rst b/doc/release/upcoming_changes/31238.compatibility.rst new file mode 100644 index 000000000000..46eccb403b17 --- /dev/null +++ b/doc/release/upcoming_changes/31238.compatibility.rst @@ -0,0 +1,21 @@ +Corrections to the BTPE binomial sampler +---------------------------------------- + +Two independent errors in the Stirling series of the acceptance/rejection +step of the BTPE algorithm used by `numpy.random.Generator.binomial` +have been corrected: + +* The third and fourth error terms were added rather than + subtracted. This sign error was inherited from section 5.3 of the + original 1988 paper by Kachitvichyanukul & Schmeiser, which incorrectly + adds all four terms. + +* The leading coefficient had a digit-swap typo (``13680`` instead of ``13860``) + that was introduced in the initial implementation. + +As a result, ``Generator.binomial`` and ``Generator.multinomial`` (which uses +binomial internally) may now return different samples for the same seed. + +The legacy `numpy.random.RandomState.binomial` and +`numpy.random.RandomState.multinomial` are not affected: they preserve the +original (incorrect) behavior, so existing streams remain reproducible. diff --git a/doc/release/upcoming_changes/README.rst b/doc/release/upcoming_changes/README.rst index 51ccd7690eff..c1b9a91dd3c1 100644 --- a/doc/release/upcoming_changes/README.rst +++ b/doc/release/upcoming_changes/README.rst @@ -24,6 +24,7 @@ Each file should be named like ``..rst``, where * ``improvement``: General improvements and edge-case changes which are not new features or compatibility related. * ``performance``: Performance changes that should not affect other behaviour. +* ``typing``: Improvements and changes related to static typing. * ``change``: Other changes * ``highlight``: Adds a highlight bullet point to use as a possibly highlight of the release. @@ -59,4 +60,3 @@ will look in the final release notes. This README was adapted from the pytest changelog readme under the terms of the MIT licence. - diff --git a/doc/source/_static/favicon/apple-touch-icon.png b/doc/source/_static/favicon/apple-touch-icon.png index e6cd574260aa..4af8e0e96e31 100644 Binary files a/doc/source/_static/favicon/apple-touch-icon.png and b/doc/source/_static/favicon/apple-touch-icon.png differ diff --git a/doc/source/_static/index-images/api.svg b/doc/source/_static/index-images/api.svg index e637525cc0b6..993cc4bc9640 100644 --- a/doc/source/_static/index-images/api.svg +++ b/doc/source/_static/index-images/api.svg @@ -1,31 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/doc/source/_static/index-images/contributor.svg b/doc/source/_static/index-images/contributor.svg index 3a689e0e4cb2..6fbc4c18ac4d 100644 --- a/doc/source/_static/index-images/contributor.svg +++ b/doc/source/_static/index-images/contributor.svg @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/doc/source/_static/index-images/getting_started.svg b/doc/source/_static/index-images/getting_started.svg index 04db7e615671..c451a0ee8f56 100644 --- a/doc/source/_static/index-images/getting_started.svg +++ b/doc/source/_static/index-images/getting_started.svg @@ -1,31 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/doc/source/_static/index-images/user_guide.svg b/doc/source/_static/index-images/user_guide.svg index d61b0937da75..9f502effd85d 100644 --- a/doc/source/_static/index-images/user_guide.svg +++ b/doc/source/_static/index-images/user_guide.svg @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index 1555dafb5539..a08902c9a7d8 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -28,12 +28,15 @@ body { button.btn.version-switcher__button, button.btn.version-switcher__button:hover { - color: black; font-size: small; } /* Main index page overview cards */ +.sd-card { + background-color: var(--pst-color-on-background); +} + .sd-card .sd-card-img-top { height: 60px; width: 60px; @@ -45,7 +48,8 @@ button.btn.version-switcher__button:hover { /* Main index page overview images */ html[data-theme=dark] .sd-card img[src*='.svg'] { - filter: invert(0.82) brightness(0.8) contrast(1.2); + filter: invert(1.0) saturate(0.0); + background: none; } /* Legacy admonition */ diff --git a/doc/source/building/cpu_simd.rst b/doc/source/building/cpu_simd.rst new file mode 100644 index 000000000000..f99a68b0c28a --- /dev/null +++ b/doc/source/building/cpu_simd.rst @@ -0,0 +1,26 @@ +CPU support & SIMD +================== + +NumPy supports a wide range of platforms and CPUs, and includes a significant +amount of code optimized for specific CPUs. By default, NumPy targets a +baseline with the minimum required SIMD instruction sets that are needed +(e.g., SSE4.2 on x86-64 CPUs) and uses dynamic dispatch to use newer instruction +sets (e.g., AVX2 and AVX512 on x86-64) when those are detected at runtime. + +There are a number of build options that can be used to modify that behavior. +The default build settings are chosen for both portability and performance, and +should be reasonably close to optimal for creating redistributable binaries as +well as local installs. That said, there are reasons one may want to change the +default behavior, for example to obtain smaller binaries, to install on very old +hardware, to work around bugs, or for testing. + +To detect and use all CPU features available on your local machine:: + + $ python -m pip install . -Csetup-args=-Dcpu-baseline="native" -Csetup-args=-Dcpu-dispatch="none" + +To use a lower baseline without any SIMD optimizations, useful for very old CPUs:: + + $ python -m pip install . -Csetup-args=-Dcpu-baseline="none" + +For more usage scenarios and more in-depth information about NumPy's SIMD support, +see :ref:`cpu-build-options`. diff --git a/doc/source/building/distutils_equivalents.rst b/doc/source/building/distutils_equivalents.rst index 156174d02358..65821bfec9d9 100644 --- a/doc/source/building/distutils_equivalents.rst +++ b/doc/source/building/distutils_equivalents.rst @@ -3,7 +3,7 @@ Meson and ``distutils`` ways of doing things -------------------------------------------- -*Old workflows (numpy.distutils based):* +*Old workflows (numpy.distutils based, no longer relevant):* 1. ``python runtests.py`` 2. ``python setup.py build_ext -i`` + ``export diff --git a/doc/source/building/index.rst b/doc/source/building/index.rst index d027ecb0ee8f..5493c0fd2dd6 100644 --- a/doc/source/building/index.rst +++ b/doc/source/building/index.rst @@ -140,66 +140,26 @@ your system. .. tab-item:: Windows :sync: windows - On Windows, the use of a Fortran compiler is more tricky than on other - platforms, because MSVC does not support Fortran, and gfortran and MSVC - can't be used together. If you don't need to run the ``f2py`` tests, simply - using MSVC is easiest. Otherwise, you will need one of these sets of - compilers: - - 1. MSVC + Intel Fortran (``ifort``) - 2. Intel compilers (``icc``, ``ifort``) - 3. Mingw-w64 compilers (``gcc``, ``g++``, ``gfortran``) - Compared to macOS and Linux, building NumPy on Windows is a little more - difficult, due to the need to set up these compilers. It is not possible to - just call a one-liner on the command prompt as you would on other - platforms. - - First, install Microsoft Visual Studio - the 2019 Community Edition or any - newer version will work (see the + difficult, due to the need to set up compilers. First, install Microsoft + Visual Studio - the 2019 Community Edition or any newer version will work + (see the `Visual Studio download site `__). - This is needed even if you use the MinGW-w64 or Intel compilers, in order - to ensure you have the Windows Universal C Runtime (the other components of - Visual Studio are not needed when using Mingw-w64, and can be deselected if - desired, to save disk space). The recommended version of the UCRT is - >= 10.0.22621.0. - - .. tab-set:: - - .. tab-item:: MSVC - - The MSVC installer does not put the compilers on the system path, and - the install location may change. To query the install location, MSVC - comes with a ``vswhere.exe`` command-line utility. And to make the - C/C++ compilers available inside the shell you are using, you need to - run a ``.bat`` file for the correct bitness and architecture (e.g., for - 64-bit Intel CPUs, use ``vcvars64.bat``). - - If using a Conda environment while a version of Visual Studio 2019+ is - installed that includes the MSVC v142 package (VS 2019 C++ x86/x64 - build tools), activating the conda environment should cause Visual - Studio to be found and the appropriate .bat file executed to set - these variables. - - For detailed guidance, see `Use the Microsoft C++ toolset from the command line - `__. + This is needed to ensure you have the Windows Universal C Runtime. The + recommended version of the UCRT is >= 10.0.22621.0. - .. tab-item:: Intel + The MSVC installer does not put the compilers on the system path, and + the install location may change. To query the install location, MSVC + comes with a ``vswhere.exe`` command-line utility. And to make the + C/C++ compilers available inside the shell you are using, you need to + run a ``.bat`` file for the correct bitness and architecture (e.g., for + 64-bit Intel CPUs, use ``vcvars64.bat``). - Similar to MSVC, the Intel compilers are designed to be used with an - activation script (``Intel\oneAPI\setvars.bat``) that you run in the - shell you are using. This makes the compilers available on the path. - For detailed guidance, see - `Get Started with the IntelÂŽ oneAPI HPC Toolkit for Windows - `__. + For detailed guidance, see `Use the Microsoft C++ toolset from the command line + `__. - .. tab-item:: MinGW-w64 - - There are several sources of binaries for MinGW-w64. We recommend the - RTools versions, which can be installed with Chocolatey (see - Chocolatey install instructions `here `_):: - - choco install rtools -y --no-progress --force --version=4.0.0.20220206 + If you don't need ``f2py``, MSVC alone is sufficient. For Fortran compiler + setup (needed for ``f2py``), see :ref:`F2PY and Windows `. .. note:: @@ -208,9 +168,7 @@ your system. can be found) in order to be found, with the exception of MSVC which will be found automatically if and only if there are no other compilers on the ``PATH``. You can use any shell (e.g., Powershell, ``cmd`` or - Git Bash) to invoke a build. To check that this is the case, try - invoking a Fortran compiler in the shell you use (e.g., ``gfortran - --version`` or ``ifort --version``). + Git Bash) to invoke a build. .. warning:: @@ -218,52 +176,32 @@ your system. creation will not work due to an outdated Fortran compiler. If that happens, remove the ``compilers`` entry from ``environment.yml`` and try again. The Fortran compiler should be installed as described in - this section. + the :ref:`F2PY Windows documentation `. .. tab-item:: Windows on ARM64 :sync: Windows on ARM64 - In Windows on ARM64, the set of a compiler options that are available for - building NumPy are limited. Compilers such as GCC and GFortran are not yet - supported for Windows on ARM64. Currently, the NumPy build for Windows on ARM64 - is supported with MSVC and LLVM toolchains. The use of a Fortran compiler is - more tricky than on other platforms, because MSVC does not support Fortran, and - gfortran and MSVC can't be used together. If you don't need to run the ``f2py`` - tests, simply using MSVC is easiest. Otherwise, you will need the following - set of compilers: - - 1. MSVC + flang (``cl``, ``flang``) - 2. LLVM + flang (``clang-cl``, ``flang``) + In Windows on ARM64, the compiler options available for building NumPy are + limited. GCC and gfortran are not yet supported. Currently, the NumPy build + for Windows on ARM64 is supported with MSVC and LLVM toolchains. First, install Microsoft Visual Studio - the 2022 Community Edition will work (see the `Visual Studio download site `__). - Ensure that you have installed necessary Visual Studio components for building NumPy + Ensure that you have installed necessary Visual Studio components for building NumPy on WoA from `here `__. - To use the flang compiler for Windows on ARM64, install Latest LLVM - toolchain for WoA from `here `__. - - .. tab-set:: - - .. tab-item:: MSVC - - The MSVC installer does not put the compilers on the system path, and - the install location may change. To query the install location, MSVC - comes with a ``vswhere.exe`` command-line utility. And to make the - C/C++ compilers available inside the shell you are using, you need to - run a ``.bat`` file for the correct bitness and architecture (e.g., for - ARM64-based CPUs, use ``vcvarsarm64.bat``). - - For detailed guidance, see `Use the Microsoft C++ toolset from the command line - `__. + The MSVC installer does not put the compilers on the system path, and + the install location may change. To query the install location, MSVC + comes with a ``vswhere.exe`` command-line utility. And to make the + C/C++ compilers available inside the shell you are using, you need to + run a ``.bat`` file for the correct bitness and architecture (e.g., for + ARM64-based CPUs, use ``vcvarsarm64.bat``). - .. tab-item:: LLVM + For detailed guidance, see `Use the Microsoft C++ toolset from the command line + `__. - Similar to MSVC, LLVM does not put the compilers on the system path. - To set system path for LLVM compilers, users may need to use ``set`` - command to put compilers on the system path. To check compiler's path - for LLVM's clang-cl, try invoking LLVM's clang-cl compiler in the shell you use - (``clang-cl --version``). + If you don't need ``f2py``, MSVC alone is sufficient. For Fortran compiler + setup (needed for ``f2py``), see :ref:`F2PY and Windows `. .. note:: @@ -272,9 +210,7 @@ your system. can be found) in order to be found, with the exception of MSVC which will be found automatically if and only if there are no other compilers on the ``PATH``. You can use any shell (e.g., Powershell, ``cmd`` or - Git Bash) to invoke a build. To check that this is the case, try - invoking a Fortran compiler in the shell you use (e.g., ``flang - --version``). + Git Bash) to invoke a build. .. warning:: @@ -515,6 +451,7 @@ Customizing builds compilers_and_options blas_lapack + cpu_simd cross_compilation redistributable_binaries diff --git a/doc/source/building/understanding_meson.rst b/doc/source/building/understanding_meson.rst index b990ff283271..0c29302c9abb 100644 --- a/doc/source/building/understanding_meson.rst +++ b/doc/source/building/understanding_meson.rst @@ -87,11 +87,11 @@ that's just an arbitrary name we picked here):: meson install -C build -It will then install to ``build-install/lib/python3.11/site-packages/numpy``, +It will then install to ``build-install/lib/python3.12/site-packages/numpy``, which is not on your Python path, so to add it do (*again, this is for learning purposes, using ``PYTHONPATH`` explicitly is typically not the best idea*):: - export PYTHONPATH=$PWD/build-install/lib/python3.11/site-packages/ + export PYTHONPATH=$PWD/build-install/lib/python3.12/site-packages/ Now we should be able to import ``numpy`` and run the tests. Remembering that we need to move out of the root of the repo to ensure we pick up the package diff --git a/doc/source/conf.py b/doc/source/conf.py index af431db44351..e4909c11bd63 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -2,6 +2,7 @@ import os import re import sys +import sysconfig from datetime import datetime from docutils import nodes @@ -17,6 +18,9 @@ # must be kept alive to hold the patched names _name_cache = {} +FREE_THREADED_BUILD = sysconfig.get_config_var('Py_GIL_DISABLED') + + def replace_scalar_type_names(): """ Rename numpy types to use the canonical names to make sphinx behave """ import ctypes @@ -30,10 +34,19 @@ class PyObject(ctypes.Structure): class PyTypeObject(ctypes.Structure): pass - PyObject._fields_ = [ - ('ob_refcnt', Py_ssize_t), - ('ob_type', ctypes.POINTER(PyTypeObject)), - ] + if not FREE_THREADED_BUILD: + PyObject._fields_ = [ + ('ob_refcnt', Py_ssize_t), + ('ob_type', ctypes.POINTER(PyTypeObject)), + ] + else: + # As of Python 3.14 + PyObject._fields_ = [ + ('ob_refcnt_full', ctypes.c_int64), + # an anonymous struct that we don't try to model + ('__private', ctypes.c_int64), + ('ob_type', ctypes.POINTER(PyTypeObject)), + ] PyTypeObject._fields_ = [ # varhead @@ -57,7 +70,7 @@ class PyTypeObject(ctypes.Structure): if sys.implementation.name == 'cpython': c_typ.tp_name = _name_cache[typ] = b"numpy." + name.encode('utf8') else: - # It is not guarenteed that the c_typ has this model on other + # It is not guaranteed that the c_typ has this model on other # implementations _name_cache[typ] = b"numpy." + name.encode('utf8') @@ -114,7 +127,7 @@ class PyTypeObject(ctypes.Structure): templates_path = ['_templates'] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = {'.rst': 'restructuredtext'} # General substitutions. project = 'NumPy' @@ -145,32 +158,6 @@ class PyTypeObject(ctypes.Structure): # The reST default role (used for this markup: `text`) to use for all documents. default_role = "autolink" -# List of directories, relative to source directories, that shouldn't be searched -# for source files. -exclude_dirs = [] - -exclude_patterns = [] -suppress_warnings = [] -nitpick_ignore = [] - -if sys.version_info[:2] >= (3, 12): - exclude_patterns += [ - "reference/distutils.rst", - "reference/distutils/misc_util.rst", - ] - suppress_warnings += [ - 'toc.excluded', # Suppress warnings about excluded toctree entries - ] - nitpicky = True - nitpick_ignore += [ - ('ref', 'numpy-distutils-refguide'), - # The first ignore is not catpured without nitpicky = True. - # These three ignores are required once nitpicky = True is set. - ('py:mod', 'numpy.distutils'), - ('py:class', 'Extension'), - ('py:class', 'numpy.distutils.misc_util.Configuration'), - ] - # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False @@ -436,7 +423,7 @@ def setup(app): 'matplotlib': ('https://matplotlib.org/stable', None), 'imageio': ('https://imageio.readthedocs.io/en/stable', None), 'skimage': ('https://scikit-image.org/docs/stable', None), - 'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None), + 'pandas': ('https://pandas.pydata.org/docs', None), 'scipy-lecture-notes': ('https://scipy-lectures.org', None), 'pytest': ('https://docs.pytest.org/en/stable', None), 'numpy-tutorials': ('https://numpy.org/numpy-tutorials', None), @@ -596,7 +583,7 @@ def linkcode_resolve(domain, info): fn = relpath(fn, start=dirname(numpy.__file__)) if lineno: - linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1) + linespec = f"#L{lineno}-L{lineno + len(source) - 1}" else: linespec = "" @@ -606,8 +593,8 @@ def linkcode_resolve(domain, info): if 'dev' in numpy.__version__: return f"https://github.com/numpy/numpy/blob/main/numpy/{fn}{linespec}" else: - return "https://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % ( - numpy.__version__, fn, linespec) + return (f"https://github.com/numpy/numpy/blob/v{numpy.__version__}/" + f"numpy/{fn}{linespec}") from pygments.lexer import inherit @@ -618,7 +605,7 @@ def linkcode_resolve(domain, info): class NumPyLexer(CLexer): name = 'NUMPYLEXER' - tokens = { + tokens = { # noqa: RUF012 'statements': [ (r'@[a-zA-Z_]*@', Comment.Preproc, 'macro'), inherit, @@ -634,7 +621,7 @@ class NumPyLexer(CLexer): breathe_default_members = ("members", "undoc-members", "protected-members") # See https://github.com/breathe-doc/breathe/issues/696 -nitpick_ignore += [ +nitpick_ignore = [ ('c:identifier', 'FILE'), ('c:identifier', 'size_t'), ('c:identifier', 'PyHeapTypeObject'), diff --git a/doc/source/dev/ai_policy.rst b/doc/source/dev/ai_policy.rst new file mode 100644 index 000000000000..3a793d45b849 --- /dev/null +++ b/doc/source/dev/ai_policy.rst @@ -0,0 +1,80 @@ +.. _ai_policy: + +AI Policy +========= + +"AI" herein refers to generative AI tools like large language models (LLMs) +that can generate, edit, and review software code, create and manipulate +images, or generate human-like communication. + +Responsibility +-------------- + +You are responsible for any code you submit to NumPy's repositories, regardless +of whether it was manually written or generated by AI. You must understand and be able +to explain the code you submit as well as the existing related code. It is not +acceptable to submit a patch that you cannot understand and explain yourself. +In explaining your contribution, do not use AI to automatically generate +comments, pull request descriptions, or issue descriptions. See below for our +policy on AI translation systems. + +Disclosure +---------- + +You must disclose whether AI has been used to assist in the development of +your pull request. +If so, you must document which tool(s) have been used, how they were used, +and specify what code or text is AI generated. We will reject any pull request +that does not include the disclosure. + +Code Quality +------------ + +Contributors are expected to submit code that meets NumPy's standards. We will +reject pull requests that we deem to be "`AI slop`_". Do not waste developers' +time by submitting code that is fully or mostly generated by AI, and doesn't +meet our standards. + +.. _AI slop: https://en.wikipedia.org/wiki/AI_slop + +Copyright +--------- + +All code in NumPy is released under the BSD 3-clause copyright license. +Contributors to NumPy license their code under the same license when it is +included in NumPy's version control repository. That means contributors must +own the copyright of any code submitted to NumPy or must include the BSD +3-clause compatible open source license(s) associated with the submitted code +in the patch. Code generated by AI may infringe on copyright and it is the +submitter's responsibility to not infringe. We reserve the right to reject any pull +requests, AI-generated or not, where the copyright is in question. + +Communication +------------- + +When interacting with developers (forums, discussions, +issues, pull requests, etc.) do not use AI to speak for you, except for +translation or grammar editing. If the developers want to chat with a chatbot, +they can do so themselves. Human-to-human communication is essential for an +open source community to thrive. + +AI Agents +--------- +The use of an AI agent that writes code and then submits a pull request autonomously is +not permitted. A human must check any generated code and submit a pull request according +to the 'Responsibility' section above. + +Other Resources +--------------- +While these do not formally form part of NumPy's AI policy, the following resources +may be helpful in understanding some pitfalls associated with using AI to contribute to +NumPy: + +- https://llvm.org/docs/AIToolPolicy.html +- https://github.com/melissawm/open-source-ai-contribution-policies +- https://blog.scientific-python.org/scientific-python/community-considerations-around-ai/ + +Acknowledgements +---------------- +We thank the SciPy developers for their AI policy, upon which this document is largely +based. diff --git a/doc/source/dev/depending_on_numpy.rst b/doc/source/dev/depending_on_numpy.rst index 7583dc9af84a..98dc552a779e 100644 --- a/doc/source/dev/depending_on_numpy.rst +++ b/doc/source/dev/depending_on_numpy.rst @@ -141,9 +141,7 @@ for dropping support for old Python and NumPy versions: :ref:`NEP29`. We recommend all packages depending on NumPy to follow the recommendations in NEP 29. -For *run-time dependencies*, specify version bounds using -``install_requires`` in ``setup.py`` (assuming you use ``numpy.distutils`` or -``setuptools`` to build). +For *run-time dependencies*, specify version bounds in `pyproject.toml`. Most libraries that rely on NumPy will not need to set an upper version bound: NumPy is careful to preserve backward-compatibility. diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index c2085a0013ef..5d77509b43dc 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -206,18 +206,82 @@ Install all dependent packages using pip:: To run lint checks before committing new code, run:: - $ python tools/linter.py - -To check all changes in newly added Python code of current branch with target branch, run:: - - $ python tools/linter.py - -If there are no errors, the script exits with no message. In case of errors, -check the error message for details:: - - $ python tools/linter.py - ./numpy/_core/tests/test_scalarmath.py:34:5: E303 too many blank lines (3) - 1 E303 too many blank lines (3) + $ spin lint + +If there are no errors, the output will look like:: + + $ spin lint + Running Ruff Check... + All checks passed! + + Running C API borrow-reference linter... + Scanning 548 C/C++ source files... + + All checks passed! C API borrow-ref linter found no issues. + + + Running cython-lint... + +In case of errors, check the error message for details:: + + $ spin lint + Running Ruff Check... + I001 [*] Import block is un-sorted or un-formatted + --> numpy/matlib.py:12:1 + | + 10 | PendingDeprecationWarning, stacklevel=2) + 11 | + 12 | / import numpy as np + 13 | | + 14 | | # Matlib.py contains all functions in the numpy namespace with a few + 15 | | # replacements. See doc/source/reference/routines.matlib.rst for details. + 16 | | # Need * as we're copying the numpy namespace. + 17 | | from numpy import * # noqa: F403 + 18 | | from numpy.matrixlib.defmatrix import matrix, asmatrix + | |______________________________________________________^ + 19 | + 20 | __version__ = np.__version__ + | + help: Organize imports + 15 | # replacements. See doc/source/reference/routines.matlib.rst for details. + 16 | # Need * as we're copying the numpy namespace. + 17 | from numpy import * # noqa: F403 + - from numpy.matrixlib.defmatrix import matrix, asmatrix + 18 + from numpy.matrixlib.defmatrix import asmatrix, matrix + 19 | + 20 | __version__ = np.__version__ + 21 | + + E501 Line too long (127 > 88) + --> numpy/matlib.py:214:89 + | + 212 | ------- + 213 | I : matrix + 214 | A `n` x `M` matrix where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 215 | + 216 | See Also + | + + Found 2 errors. + [*] 1 fixable with the `--fix` option. + +To automatically fix issues that can be fixed, run:: + + $ spin lint --fix + Running Ruff Check... + E501 Line too long (127 > 88) + --> numpy/matlib.py:214:89 + | + 212 | ------- + 213 | I : matrix + 214 | A `n` x `M` matrix where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 215 | + 216 | See Also + | + + Found 2 errors (1 fixed, 1 remaining). It is advisable to run lint checks before pushing commits to a remote branch since the linter runs as part of the CI pipeline. diff --git a/doc/source/dev/development_workflow.rst b/doc/source/dev/development_workflow.rst index 10b07cc1f437..16e383e125dc 100644 --- a/doc/source/dev/development_workflow.rst +++ b/doc/source/dev/development_workflow.rst @@ -269,6 +269,10 @@ function, add a release note to the ``doc/release/upcoming_changes/`` directory, following the instructions and format in the ``doc/release/upcoming_changes/README.rst`` file. +Use the same prefix convention for your pull request title as for commit +messages (e.g., ``BUG:``, ``ENH:``, ``DOC:``). This enables automated labeling +of your PR. + .. _workflow_PR_timeline: diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index 50dac45e475a..bee0bf746ab4 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -183,6 +183,8 @@ Guidelines * No changes are ever committed without review and approval by a core team member. Please ask politely on the PR or on the `mailing list`_ if you get no response to your pull request within a week. +* Do not include copyright notices in source code without explicitly discussing the need first. + In general, any code you contribute to the project is under the project `license `_. .. _stylistic-guidelines: @@ -252,7 +254,9 @@ The rest of the story .. toctree:: :maxdepth: 2 + ai_policy development_environment + spin howto_build_docs development_workflow development_advanced_debugging diff --git a/doc/source/dev/spin.rst b/doc/source/dev/spin.rst new file mode 100644 index 000000000000..c1a00c337c30 --- /dev/null +++ b/doc/source/dev/spin.rst @@ -0,0 +1,28 @@ +.. _spin_tool: + +Spin: NumPy’s developer tool +---------------------------- + +NumPy uses a command-line tool called ``spin`` to support common development +tasks such as building from source, running tests, building documentation, +and managing other +developer workflows. + +The ``spin`` tool provides a consistent interface for contributors working on +NumPy itself, wrapping multiple underlying tools and configurations into a +single command that follows NumPy’s development conventions. +Running the full test suite:: + + $ spin test -m full + +Running a subset of tests:: + + $ spin test -t numpy/_core/tests + +Running tests with coverage:: + + $ spin test --coverage + +Building the documentation:: + + $ spin docs \ No newline at end of file diff --git a/doc/source/doxyfile b/doc/source/doxyfile index ea45b9578309..551f66dda721 100644 --- a/doc/source/doxyfile +++ b/doc/source/doxyfile @@ -1,4 +1,4 @@ -# Doxyfile 1.8.18 +# Doxyfile 1.13.2 #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- @@ -11,7 +11,6 @@ OUTPUT_DIRECTORY = @ROOT_DIR/doc/build/doxygen CREATE_SUBDIRS = NO ALLOW_UNICODE_NAMES = NO OUTPUT_LANGUAGE = English -OUTPUT_TEXT_DIRECTION = None BRIEF_MEMBER_DESC = YES REPEAT_BRIEF = YES ABBREVIATE_BRIEF = "The $name class" \ @@ -145,15 +144,10 @@ REFERENCES_LINK_SOURCE = YES SOURCE_TOOLTIPS = YES USE_HTAGS = NO VERBATIM_HEADERS = YES -CLANG_ASSISTED_PARSING = NO -CLANG_OPTIONS = -CLANG_DATABASE_PATH = #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- ALPHABETICAL_INDEX = YES -COLS_IN_ALPHA_INDEX = 5 -IGNORE_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the HTML output #--------------------------------------------------------------------------- @@ -168,7 +162,6 @@ HTML_EXTRA_FILES = HTML_COLORSTYLE_HUE = 220 HTML_COLORSTYLE_SAT = 100 HTML_COLORSTYLE_GAMMA = 80 -HTML_TIMESTAMP = NO HTML_DYNAMIC_MENUS = YES HTML_DYNAMIC_SECTIONS = NO HTML_INDEX_NUM_ENTRIES = 100 @@ -201,7 +194,6 @@ TREEVIEW_WIDTH = 250 EXT_LINKS_IN_WINDOW = NO HTML_FORMULA_FORMAT = png FORMULA_FONTSIZE = 10 -FORMULA_TRANSPARENT = YES FORMULA_MACROFILE = USE_MATHJAX = NO MATHJAX_FORMAT = HTML-CSS @@ -234,9 +226,7 @@ PDF_HYPERLINKS = YES USE_PDFLATEX = YES LATEX_BATCHMODE = NO LATEX_HIDE_INDICES = NO -LATEX_SOURCE_CODE = NO LATEX_BIB_STYLE = plain -LATEX_TIMESTAMP = NO LATEX_EMOJI_DIRECTORY = #--------------------------------------------------------------------------- # Configuration options related to the RTF output @@ -247,7 +237,6 @@ COMPACT_RTF = NO RTF_HYPERLINKS = NO RTF_STYLESHEET_FILE = RTF_EXTENSIONS_FILE = -RTF_SOURCE_CODE = NO #--------------------------------------------------------------------------- # Configuration options related to the man page output #--------------------------------------------------------------------------- @@ -268,7 +257,6 @@ XML_NS_MEMB_FILE_SCOPE = NO #--------------------------------------------------------------------------- GENERATE_DOCBOOK = NO DOCBOOK_OUTPUT = docbook -DOCBOOK_PROGRAMLISTING = NO #--------------------------------------------------------------------------- # Configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- @@ -303,13 +291,10 @@ EXTERNAL_PAGES = YES #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- -CLASS_DIAGRAMS = YES DIA_PATH = HIDE_UNDOC_RELATIONS = YES HAVE_DOT = NO DOT_NUM_THREADS = 0 -DOT_FONTNAME = Helvetica -DOT_FONTSIZE = 10 DOT_FONTPATH = CLASS_GRAPH = YES COLLABORATION_GRAPH = YES @@ -334,7 +319,6 @@ PLANTUML_CFG_FILE = PLANTUML_INCLUDE_PATH = DOT_GRAPH_MAX_NODES = 50 MAX_DOT_GRAPH_DEPTH = 0 -DOT_TRANSPARENT = NO DOT_MULTI_TARGETS = NO GENERATE_LEGEND = YES DOT_CLEANUP = YES diff --git a/doc/source/f2py/buildtools/distutils-to-meson.rst b/doc/source/f2py/buildtools/distutils-to-meson.rst index 585bfba57246..920848b9d0d3 100644 --- a/doc/source/f2py/buildtools/distutils-to-meson.rst +++ b/doc/source/f2py/buildtools/distutils-to-meson.rst @@ -5,8 +5,7 @@ ------------------------ As per the timeline laid out in :ref:`distutils-status-migration`, -``distutils`` has ceased to be the default build backend for ``f2py``. This page -collects common workflows in both formats. +``distutils`` has been removed. This page collects common workflows. .. note:: @@ -44,8 +43,6 @@ This will not win any awards, but can be a reasonable starting point. 1.2.1 Basic Usage ^^^^^^^^^^^^^^^^^ -This is unchanged: - .. code:: bash python -m numpy.f2py -c fib.f90 -m fib @@ -57,46 +54,21 @@ This is unchanged: 1.2.2 Specify the backend ^^^^^^^^^^^^^^^^^^^^^^^^^ -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend distutils +.. code-block:: bash - This is the default for Python versions before 3.12. + python -m numpy.f2py -c fib.f90 -m fib - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend meson - - This is the only option for Python versions after 3.12. +This is the only option. There used to be a ``distutils`` backend but it was +removed in NumPy2.5.0. 1.2.3 Pass a compiler name ^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils +.. code-block:: bash - .. code-block:: bash + FC=gfortran python -m numpy.f2py -c fib.f90 -m fib - python -m numpy.f2py -c fib.f90 -m fib --backend distutils --fcompiler=gfortran - - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash - - FC="gfortran" python -m numpy.f2py -c fib.f90 -m fib --backend meson - - Native files can also be used. +Native files can also be used. Similarly, ``CC`` can be used in both cases to set the ``C`` compiler. Since the environment variables are generally pretty common across both, so a small @@ -137,73 +109,31 @@ sample is included below. 1.2.4 Dependencies ^^^^^^^^^^^^^^^^^^ -Here, ``meson`` can actually be used to set dependencies more robustly. - -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash +.. code-block:: bash - python -m numpy.f2py -c fib.f90 -m fib --backend distutils -llapack + python -m numpy.f2py -c fib.f90 -m fib --dep lapack - Note that this approach in practice is error prone. - - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend meson --dep lapack - - This maps to ``dependency("lapack")`` and so can be used for a wide variety - of dependencies. They can be `customized further `_ - to use CMake or other systems to resolve dependencies. +This maps to ``dependency("lapack")`` and so can be used for a wide variety +of dependencies. They can be `customized further `_ +to use CMake or other systems to resolve dependencies. 1.2.5 Libraries ^^^^^^^^^^^^^^^ -Both ``meson`` and ``distutils`` are capable of linking against libraries. - -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend distutils -lmylib -L/path/to/mylib +``meson`` is capable of linking against libraries. - .. tab-item:: Meson - :sync: meson +.. code-block:: bash - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend meson -lmylib -L/path/to/mylib + python -m numpy.f2py -c fib.f90 -m fib -lmylib -L/path/to/mylib 1.3 Customizing builds ~~~~~~~~~~~~~~~~~~~~~~ -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend distutils --build-dir blah - - This can be technically integrated with other codes, see :ref:`f2py-distutils`. - - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash +.. code-block:: bash - python -m numpy.f2py -c fib.f90 -m fib --backend meson --build-dir blah + python -m numpy.f2py -c fib.f90 -m fib --build-dir blah - The resulting build can be customized via the - `Meson Build How-To Guide `_. - In fact, the resulting set of files can even be committed directly and used - as a meson subproject in a separate codebase. +The resulting build can be customized via the +`Meson Build How-To Guide `_. +In fact, the resulting set of files can even be committed directly and used +as a meson subproject in a separate codebase. diff --git a/doc/source/f2py/buildtools/distutils.rst b/doc/source/f2py/buildtools/distutils.rst deleted file mode 100644 index 87e17a811cd0..000000000000 --- a/doc/source/f2py/buildtools/distutils.rst +++ /dev/null @@ -1,84 +0,0 @@ -.. _f2py-distutils: - -============================= -Using via `numpy.distutils` -============================= - -.. legacy:: - - ``distutils`` has been removed in favor of ``meson`` see - :ref:`distutils-status-migration`. - - -.. currentmodule:: numpy.distutils.core - -:mod:`numpy.distutils` is part of NumPy, and extends the standard Python -``distutils`` module to deal with Fortran sources and F2PY signature files, e.g. -compile Fortran sources, call F2PY to construct extension modules, etc. - -.. topic:: Example - - Consider the following ``setup_file.py`` for the ``fib`` and ``scalar`` - examples from :ref:`f2py-getting-started` section: - - .. literalinclude:: ./../code/setup_example.py - :language: python - - Running - - .. code-block:: bash - - python setup_example.py build - - will build two extension modules ``scalar`` and ``fib2`` to the - build directory. - -Extensions to ``distutils`` -=========================== - -:mod:`numpy.distutils` extends ``distutils`` with the following features: - -* :class:`Extension` class argument ``sources`` may contain Fortran source - files. In addition, the list ``sources`` may contain at most one - F2PY signature file, and in this case, the name of an Extension module must - match with the ```` used in signature file. It is - assumed that an F2PY signature file contains exactly one ``python - module`` block. - - If ``sources`` do not contain a signature file, then F2PY is used to scan - Fortran source files to construct wrappers to the Fortran codes. - - Additional options to the F2PY executable can be given using the - :class:`Extension` class argument ``f2py_options``. - -* The following new ``distutils`` commands are defined: - - ``build_src`` - to construct Fortran wrapper extension modules, among many other things. - ``config_fc`` - to change Fortran compiler options. - - Additionally, the ``build_ext`` and ``build_clib`` commands are also enhanced - to support Fortran sources. - - Run - - .. code-block:: bash - - python config_fc build_src build_ext --help - - to see available options for these commands. - -* When building Python packages containing Fortran sources, one - can choose different Fortran compilers by using the ``build_ext`` - command option ``--fcompiler=``. Here ```` can be one of the - following names (on ``linux`` systems):: - - absoft compaq fujitsu g95 gnu gnu95 intel intele intelem lahey nag nagfor nv pathf95 pg vast - - See ``numpy_distutils/fcompiler.py`` for an up-to-date list of - supported compilers for different platforms, or run - - .. code-block:: bash - - python -m numpy.f2py -c --backend distutils --help-fcompiler diff --git a/doc/source/f2py/buildtools/index.rst b/doc/source/f2py/buildtools/index.rst index 37782e5ca74b..b12ca01559c2 100644 --- a/doc/source/f2py/buildtools/index.rst +++ b/doc/source/f2py/buildtools/index.rst @@ -11,7 +11,7 @@ with ``f2py``. The default build system for ``f2py`` has traditionally been through the enhanced ``numpy.distutils`` module. This module is based on ``distutils`` - which was removed in ``Python 3.12.0`` in **October 2023**. Like the rest of + which was removed in ``NumPy2.5.0`` in **June 2026**. Like the rest of NumPy and SciPy, ``f2py`` uses ``meson`` now, see :ref:`distutils-status-migration` for some more details. @@ -107,8 +107,8 @@ Build systems .. toctree:: :maxdepth: 2 - distutils meson + meson-python cmake skbuild distutils-to-meson diff --git a/doc/source/f2py/buildtools/meson-python.rst b/doc/source/f2py/buildtools/meson-python.rst new file mode 100644 index 000000000000..3f5dcc350827 --- /dev/null +++ b/doc/source/f2py/buildtools/meson-python.rst @@ -0,0 +1,208 @@ +.. _f2py-meson-python: + +===================================================== +Distributing F2PY extensions with ``meson-python`` +===================================================== + +The :ref:`f2py-meson` page covers building F2PY extensions using raw ``meson`` +commands. This page shows how to package those extensions into installable +Python distributions (sdists and wheels) using `meson-python +`_ as the PEP 517 build backend. + +This is the recommended approach for distributing F2PY-wrapped Fortran code as a +Python package on PyPI or for local ``pip install`` workflows. + +.. note:: + + ``meson-python`` replaced ``setuptools`` / ``numpy.distutils`` as the + standard way to build and distribute compiled extensions in the NumPy and + SciPy ecosystem. See :ref:`distutils-status-migration` for background. + +Prerequisites +============= + +You need: + +* A C compiler +* A Fortran compiler (``gfortran``, ``ifort``, ``ifx``, ``flang-new``, etc.), + if you use any Fortran code in your package +* Python >= 3.10 +* ``meson``, ``meson-python``, and ``numpy`` (installed automatically during the + build when listed in ``build-system.requires``) + +Minimal example +=============== + +The project below wraps a Fortran ``fib`` subroutine into an importable Python +package called ``fib_wrapper``. + +Project layout:: + + fib_wrapper/ # project root + ├── fib.f90 # Fortran source + ├── fib_wrapper/ # Python package directory + │ └── __init__.py + ├── meson.build + └── pyproject.toml + +Fortran source +-------------- + +Save the following as ``fib.f90``: + +.. literalinclude:: ../code/fib_mesonpy.f90 + :language: fortran + +``pyproject.toml`` +------------------ + +.. literalinclude:: ../code/pyproj_mesonpy.toml + :language: toml + +Two entries matter here: + +* ``build-backend = "mesonpy"`` tells build frontends to use ``meson-python``. +* ``requires`` lists build-time dependencies. ``numpy >= 2.0`` is required so + that ``f2py``, the NumPy headers, and ``dependency('numpy')`` support in Meson + are available during compilation. + +``meson.build`` +--------------- + +.. literalinclude:: ../code/meson_mesonpy.build + +.. note:: + + The file is stored as ``meson_mesonpy.build`` in the documentation source + tree to avoid collisions with other examples. In your project, name it + ``meson.build``. + +The ``meson.build`` file does four things: + +1. Uses ``dependency('numpy')`` to locate NumPy headers, and a + ``declare_dependency`` to add the F2PY include directory (for + ``fortranobject.h``). +2. Runs ``f2py`` via ``custom_target`` to generate the C wrapper sources. +3. Compiles the generated C code together with the Fortran source into a Python + extension module using ``py.extension_module``. +4. Installs ``__init__.py`` into the package directory so the result is a proper + Python package. + +The ``subdir: 'fib_wrapper'`` argument on the extension module is required so +that the compiled ``fib`` shared library is installed inside the ``fib_wrapper/`` +package directory, next to ``__init__.py``. Without it the extension would +be installed at the top level and ``import fib_wrapper`` would not find the +``fib`` extension. The resulting installed layout is:: + + site-packages/ + └── fib_wrapper/ + ├── __init__.py # from .fib import fib + └── fib.cpython-*.so # compiled extension module + +``__init__.py`` +--------------- + +A minimal ``__init__.py`` re-exports the wrapped function: + +.. code-block:: python + + from .fib import fib + +Building and installing +======================= + +Editable install (development) +------------------------------ + +.. code-block:: bash + + pip install --no-build-isolation --editable . + +``--no-build-isolation`` reuses the current environment, which is useful when +iterating. This requires ``meson-python``, ``meson``, ``ninja``, and ``numpy`` +to already be installed. + +Building a wheel +---------------- + +.. code-block:: bash + + # If you don't yet have `pypa/build` installed: `pip install build` + python -m build --wheel + +The resulting ``.whl`` file in ``dist/`` can be uploaded to PyPI, or installed +elsewhere with ``pip install dist/fib_wrapper-0.1.0-*.whl``. + +Verifying the install +--------------------- + +.. code-block:: python + + >>> from fib_wrapper import fib + >>> fib(10) + array([ 0, 1, 1, 2, 3, 5, 8, 13, 21, 34], dtype=int32) + +Customizing the Fortran compiler +================================ + +``meson-python`` delegates compiler selection to ``meson``. By default, +``meson`` will choose the first Fortran compiler it finds on the PATH. +If you want more control over Fortran compiler selection, set the ``FC`` +environment variable before building: + +.. code-block:: bash + + FC=ifx python -m build --wheel + +For more control, use a `Meson native file +`_: + +.. code-block:: ini + + ; native.ini + [binaries] + fortran = 'ifx' + c = 'icx' + +.. code-block:: bash + + python -m build --wheel -Csetup-args="--native-file=native.ini" + +Adding dependencies (BLAS, LAPACK, etc.) +======================================== + +Use ``dependency()`` in ``meson.build`` to link against system libraries: + +.. code-block:: none + + lapack_dep = dependency('lapack') + + py.extension_module('mymod', + [sources, generated, incdir_f2py / 'fortranobject.c'], + dependencies : [np_dep, f2py_dep, lapack_dep], + install : true, + ) + +``meson`` resolves dependencies through ``pkg-config``, CMake, or its own +detection logic. See the `Meson dependency documentation +`_ for details. + +Differences from the ``scikit-build-core`` workflow +==================================================== + +The ``scikit-build-core`` approach documented in :ref:`f2py-skbuild` uses CMake +under the hood. ``meson-python`` provides: + +* Native Fortran compiler support in ``meson`` (no CMake layer). +* Direct integration with ``pip`` / ``build`` via PEP 517. +* The same build system used by NumPy and SciPy themselves. + +Further reading +=============== + +* `meson-python documentation `_ +* `Meson build system `_ +* `SciPy's meson build configuration `_ (real-world F2PY usage) +* :ref:`f2py-meson` (raw meson build without ``meson-python``) +* :ref:`f2py-skbuild` (alternative using ``scikit-build-core`` / CMake) +* :ref:`f2py-meson-distutils` (migration from ``distutils``) diff --git a/doc/source/f2py/buildtools/meson.rst b/doc/source/f2py/buildtools/meson.rst index c17c5d2ddc87..44560bef8c5f 100644 --- a/doc/source/f2py/buildtools/meson.rst +++ b/doc/source/f2py/buildtools/meson.rst @@ -15,11 +15,6 @@ Using via ``meson`` The default build system for ``f2py`` is now ``meson``, see :ref:`distutils-status-migration` for some more details.. -The key advantage gained by leveraging ``meson`` over the techniques described -in :ref:`f2py-distutils` is that this feeds into existing systems and larger -projects with ease. ``meson`` has a rather pythonic syntax which makes it more -comfortable and amenable to extension for ``python`` users. - Fibonacci walkthrough (F77) =========================== diff --git a/doc/source/f2py/code/fib_mesonpy.f90 b/doc/source/f2py/code/fib_mesonpy.f90 new file mode 100644 index 000000000000..f9f1f88e237f --- /dev/null +++ b/doc/source/f2py/code/fib_mesonpy.f90 @@ -0,0 +1,15 @@ +subroutine fib(a, n) + use iso_c_binding + integer(c_int), intent(in) :: n + integer(c_int), intent(out) :: a(n) + integer :: i + do i = 1, n + if (i == 1) then + a(i) = 0 + else if (i == 2) then + a(i) = 1 + else + a(i) = a(i - 1) + a(i - 2) + end if + end do +end subroutine fib diff --git a/doc/source/f2py/code/meson_mesonpy.build b/doc/source/f2py/code/meson_mesonpy.build new file mode 100644 index 000000000000..5537f3c0b6e9 --- /dev/null +++ b/doc/source/f2py/code/meson_mesonpy.build @@ -0,0 +1,43 @@ +project('fib_wrapper', 'c', + version : '0.1.0', + meson_version: '>=1.1.0', + default_options : ['warning_level=2'], +) + +add_languages('fortran', native: false) + +py = import('python').find_installation(pure: false) + +# NumPy >=2.0 provides include dirs via dependency() +np_dep = dependency('numpy') + +incdir_f2py = run_command(py, + ['-c', 'import numpy.f2py; print(numpy.f2py.get_include())'], + check : true +).stdout().strip() + +# f2py include dir (for fortranobject.h) is not in dependency('numpy'), +# so add it separately +f2py_dep = declare_dependency( + include_directories : incdir_f2py, +) + +# Generate the f2py wrappers +fib_source = custom_target('fibmodule.c', + input : ['fib.f90'], + output : ['fibmodule.c', 'fib-f2pywrappers.f'], + command : [py, '-m', 'numpy.f2py', '@INPUT@', '-m', 'fib', '--lower'] +) + +py.extension_module('fib', + ['fib.f90', fib_source, incdir_f2py / 'fortranobject.c'], + dependencies : [np_dep, f2py_dep], + subdir: 'fib_wrapper', + install : true, +) + +# Install the Python package files +py.install_sources( + 'fib_wrapper/__init__.py', + subdir: 'fib_wrapper', +) diff --git a/doc/source/f2py/code/pyproj_mesonpy.toml b/doc/source/f2py/code/pyproj_mesonpy.toml new file mode 100644 index 000000000000..751882772c8f --- /dev/null +++ b/doc/source/f2py/code/pyproj_mesonpy.toml @@ -0,0 +1,10 @@ +[build-system] +# numpy>=2.0 is required for dependency('numpy') support in meson.build +requires = ["meson-python>=0.15.0", "numpy>=2.0"] +build-backend = "mesonpy" + +[project] +name = "fib_wrapper" +version = "0.1.0" +requires-python = ">=3.10" +dependencies = ["numpy"] diff --git a/doc/source/f2py/code/setup_example.py b/doc/source/f2py/code/setup_example.py deleted file mode 100644 index ef79ad1ecfb6..000000000000 --- a/doc/source/f2py/code/setup_example.py +++ /dev/null @@ -1,16 +0,0 @@ -from numpy.distutils.core import Extension - -ext1 = Extension(name='scalar', - sources=['scalar.f']) -ext2 = Extension(name='fib2', - sources=['fib2.pyf', 'fib1.f']) - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(name='f2py_example', - description="F2PY Users Guide examples", - author="Pearu Peterson", - author_email="pearu@cens.ioc.ee", - ext_modules=[ext1, ext2] - ) -# End of setup_example.py diff --git a/doc/source/f2py/f2py.getting-started.rst b/doc/source/f2py/f2py.getting-started.rst index e5746c49e94d..e5df85b93a75 100644 --- a/doc/source/f2py/f2py.getting-started.rst +++ b/doc/source/f2py/f2py.getting-started.rst @@ -22,15 +22,12 @@ following steps: * F2PY compiles all sources and builds an extension module containing the wrappers. - * In building the extension modules, F2PY uses ``meson`` and used to use - ``numpy.distutils`` For different build systems, see :ref:`f2py-bldsys`. + * In building the extension modules, F2PY uses ``meson``. For different + build systems, see :ref:`f2py-bldsys`. .. note:: - See :ref:`f2py-meson-distutils` for migration information. - - * Depending on your operating system, you may need to install the Python development headers (which provide the file ``Python.h``) separately. In Linux Debian-based distributions this package should be called ``python3-dev``, @@ -157,9 +154,8 @@ Fortran subroutine ``FIB`` is accessible via ``fib1.fib``:: Clearly, this is unexpected, as Fortran typically passes by reference. That the above example worked with ``dtype=float`` is considered accidental. - F2PY provides an ``intent(inplace)`` attribute that modifies the attributes - of an input array so that any changes made by the Fortran routine will be - reflected in the input argument. For example, if one specifies the + F2PY provides an ``intent(inplace)`` attribute that ensures that changes + are copied back to the input argument. For example, if one specifies the ``intent(inplace) a`` directive (see :ref:`f2py-attributes` for details), then the example above would read:: @@ -224,7 +220,7 @@ Fortran code, we can apply the wrapping steps one by one. .. literalinclude:: ./code/fib2.pyf :language: fortran -* Finally, we build the extension module with ``numpy.distutils`` by running: +* Finally, we build the extension module by running: :: diff --git a/doc/source/f2py/python-usage.rst b/doc/source/f2py/python-usage.rst index 8c68b6e03e2e..ecffd695e05a 100644 --- a/doc/source/f2py/python-usage.rst +++ b/doc/source/f2py/python-usage.rst @@ -115,8 +115,9 @@ two notable exceptions: * ``intent(inout)`` array arguments must always be :term:`proper-contiguous ` and have a compatible ``dtype``, otherwise an exception is raised. -* ``intent(inplace)`` array arguments will be changed *in situ* if the argument - has a different type than expected (see the ``intent(inplace)`` +* ``intent(inplace)`` array arguments must be arrays. If these have + incompatible order or size, a converted copy is passed in, which is + copied back into the original array on exit (see the ``intent(inplace)`` :ref:`attribute ` for more information). In general, if a NumPy array is :term:`proper-contiguous ` and has diff --git a/doc/source/f2py/signature-file.rst b/doc/source/f2py/signature-file.rst index ba370d73582b..3ac47b113745 100644 --- a/doc/source/f2py/signature-file.rst +++ b/doc/source/f2py/signature-file.rst @@ -392,18 +392,15 @@ The following attributes can be used by F2PY. * ``inplace`` The corresponding argument is considered to be an input/output or *in situ* output argument. ``intent(inplace)`` arguments must be NumPy arrays of a proper - size. If the type of an array is not "proper" or the array is - non-contiguous then the array will be modified in-place to fix the type and - make it contiguous. + size. If the size of an array is not "proper" or the array is + non-contiguous then the routine will be passed a fixed copy of array, + which has the :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag set, so that the + result will be copied back to the original array on exit. .. note:: - Using ``intent(inplace)`` is generally not recommended either. - - For example, when slices have been taken from an ``intent(inplace)`` argument - then after in-place changes, the data pointers for the slices may point to - an unallocated memory area. - + Since copies may be made, ``intent(inplace)`` can be slower than expected. + It is recommended over ``inout``, but not over ``in,out``. * ``out`` The corresponding argument is considered to be a return variable. It is appended to the diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index efcf2bec5266..a1fd38d57b9d 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -101,10 +101,6 @@ Here ```` may also contain signature files. Among other options and ``;`` on Windows. In ``CMake`` this corresponds to using ``$``. -``--help-link []`` - List system resources found by ``numpy_distutils/system_info.py``. For - example, try ``f2py --help-link lapack_opt``. - 3. Building a module ~~~~~~~~~~~~~~~~~~~~ @@ -127,7 +123,7 @@ module is constructed by scanning all Fortran source codes for routine signatures, before proceeding to build the extension module. .. warning:: - From Python 3.12 onwards, ``distutils`` has been removed. Use environment + ``distutils`` has been removed. Use environment variables or native files to interact with ``meson`` instead. See its `FAQ `__ for more information. @@ -135,17 +131,13 @@ Among other options (see below) and options described for previous modes, the fo .. note:: - .. versionchanged:: 1.26.0 - There are now two separate build backends which can be used, ``distutils`` - and ``meson``. Users are **strongly** recommended to switch to ``meson`` - since it is the default above Python ``3.12``. + .. versionchanged:: 2.5.0 + The ``distutils`` backend has been removed. Common build flags: ``--backend `` - Specify the build backend for the compilation process. The supported backends - are ``meson`` and ``distutils``. If not specified, defaults to ``distutils``. - On Python 3.12 or higher, the default is ``meson``. + Legacy option, only ``meson`` is supported. ``--f77flags=`` Specify F77 compiler flags ``--f90flags=`` @@ -165,39 +157,13 @@ Common build flags: Add directory ```` to the list of directories to be searched for ``-l``. -The ``meson`` specific flags are: - -``--dep `` **meson only** +``--dep `` Specify a meson dependency for the module. This may be passed multiple times for multiple dependencies. Dependencies are stored in a list for further processing. Example: ``--dep lapack --dep scalapack`` This will identify "lapack" and "scalapack" as dependencies and remove them from argv, leaving a dependencies list containing ["lapack", "scalapack"]. -The older ``distutils`` flags are: - -``--help-fcompiler`` **no meson** - List the available Fortran compilers. -``--fcompiler=`` **no meson** - Specify a Fortran compiler type by vendor. -``--f77exec=`` **no meson** - Specify the path to a F77 compiler -``--f90exec=`` **no meson** - Specify the path to a F90 compiler -``--opt=`` **no meson** - Specify optimization flags -``--arch=`` **no meson** - Specify architecture specific optimization flags -``--noopt`` **no meson** - Compile without optimization flags -``--noarch`` **no meson** - Compile without arch-dependent optimization flags -``link-`` **no meson** - Link the extension module with as defined by - ``numpy_distutils/system_info.py``. E.g. to link with optimized LAPACK - libraries (vecLib on MacOSX, ATLAS elsewhere), use ``--link-lapack_opt``. - See also ``--help-link`` switch. - .. note:: The ``f2py -c`` option must be applied either to an existing ``.pyf`` file @@ -295,39 +261,6 @@ When using ``numpy.f2py`` as a module, the following functions can be invoked. .. automodule:: numpy.f2py :members: -Automatic extension module generation -===================================== - -If you want to distribute your f2py extension module, then you only -need to include the .pyf file and the Fortran code. The distutils -extensions in NumPy allow you to define an extension module entirely -in terms of this interface file. A valid ``setup.py`` file allowing -distribution of the ``add.f`` module (as part of the package -``f2py_examples`` so that it would be loaded as ``f2py_examples.add``) is: - -.. code-block:: python - - def configuration(parent_package='', top_path=None) - from numpy.distutils.misc_util import Configuration - config = Configuration('f2py_examples',parent_package, top_path) - config.add_extension('add', sources=['add.pyf','add.f']) - return config - - if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) - -Installation of the new package is easy using:: - - pip install . - -assuming you have the proper permissions to write to the main site- -packages directory for the version of Python you are using. For the -resulting package to work, you need to create a file named ``__init__.py`` -(in the same directory as ``add.pyf``). Notice the extension module is -defined entirely in terms of the ``add.pyf`` and ``add.f`` files. The -conversion of the .pyf file to a .c file is handled by `numpy.distutils`. - Building with Meson (Examples) ============================== @@ -346,13 +279,13 @@ This example shows how to build the ``add`` extension from the ``add.f`` and ``a files described in the :ref:`f2py-examples` (note that you do not always need a ``.pyf`` file: in many cases ``f2py`` can figure out the annotations by itself). -Project layout: +Project layout:: - f2py_examples/ - meson.build - add.f - add.pyf (optional) - __init__.py (can be empty) + f2py_examples/ + meson.build + add.f + add.pyf (optional) + __init__.py (can be empty) Example ``meson.build``: diff --git a/doc/source/f2py/windows/conda.rst b/doc/source/f2py/windows/conda.rst index 08a79b29dacd..75ee627f8e6d 100644 --- a/doc/source/f2py/windows/conda.rst +++ b/doc/source/f2py/windows/conda.rst @@ -25,10 +25,4 @@ Now we will setup a ``conda`` environment. ``conda`` pulls packages from ``msys2``, however, the UX is sufficiently different enough to warrant a separate discussion. -.. warning:: - As of 30-01-2022, the `MSYS2 binaries`_ shipped with ``conda`` are **outdated** and this approach is **not preferred**. - - - -.. _MSYS2 binaries: https://github.com/conda-forge/conda-forge.github.io/issues/1044 diff --git a/doc/source/f2py/windows/index.rst b/doc/source/f2py/windows/index.rst index ea0af7505ce7..af505ed3d2ba 100644 --- a/doc/source/f2py/windows/index.rst +++ b/doc/source/f2py/windows/index.rst @@ -58,8 +58,7 @@ Windows Subsystem for Linux Windows applications, but is significantly more complicated. Conda - Windows support for compilers in ``conda`` is facilitated by pulling MSYS2 - binaries, however these `are outdated`_, and therefore not recommended (as of 30-01-2022). + Windows support for GNU compilers in ``conda`` is provided by `conda-forge`_ channel. PGI Compilers (commercial) Unmaintained but sufficient if an existing license is present. Works @@ -71,12 +70,6 @@ Cygwin (FOSS) Cygwin is meant to compile UNIX software on Windows, instead of building native Windows programs. This means cross compilation is required. -The compilation suites described so far are compatible with the `now -deprecated`_ ``np.distutils`` build backend which is exposed by the F2PY CLI. -Additional build system usage (``meson``, ``cmake``) as described in -:ref:`f2py-bldsys` allows for a more flexible set of compiler -backends including: - Intel oneAPI The newer Intel compilers (``ifx``, ``icx``) are based on LLVM and can be used for native compilation. Licensing requirements can be onerous. @@ -214,7 +207,7 @@ path using a hash. This needs to be added to the ``PATH`` variable. .. _JeanHeyd Meneide: https://thephd.dev/binary-banshees-digital-demons-abi-c-c++-help-me-god-please .. _legacy version of Flang: https://github.com/flang-compiler/flang .. _native Windows support: https://developer.nvidia.com/nvidia-hpc-sdk-downloads#collapseFour -.. _are outdated: https://github.com/conda-forge/conda-forge.github.io/issues/1044 +.. _conda-forge: https://conda-forge.org/docs/maintainer/infrastructure/#compilers-supplied-by-conda-forge .. _now deprecated: https://github.com/numpy/numpy/pull/20875 .. _LLVM Flang: https://releases.llvm.org/11.0.0/tools/flang/docs/ReleaseNotes.html .. _SciPy's documentation: https://scipy.github.io/devdocs/building/index.html#system-level-dependencies diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst index ae2ab6ea4247..838963eb9ccc 100644 --- a/doc/source/glossary.rst +++ b/doc/source/glossary.rst @@ -472,7 +472,8 @@ Glossary Strides are computed automatically from an array's dtype and shape, but can be directly specified using - :doc:`as_strided. ` + :doc:`as_strided `. + Bounds validation can be enabled with the ``check_bounds`` parameter. For details, see :doc:`numpy.ndarray.strides `. diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 80821c2c08fa..d38043661a52 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -338,6 +338,66 @@ NumPy provides several hooks that classes can customize: `, results will *not* be written to the object returned by :func:`__array__`. This practice will return ``TypeError``. + **Example** + + Use ``__array__`` to create a diagonal array of fixed size and value: + + >>> import numpy as np + >>> class DiagonalArray: + ... def __init__(self, N, value): + ... self._N = N + ... self._i = value + ... def __repr__(self): + ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" + ... def __array__(self, dtype=None, copy=None): + ... if copy is False: + ... raise ValueError( + ... "`copy=False` isn't supported. A copy is always created." + ... ) + ... return self._i * np.eye(self._N, dtype=dtype) + + Our custom array can be instantiated like: + + >>> arr = DiagonalArray(5, 1) + >>> arr + DiagonalArray(N=5, value=1) + + We can convert to a numpy array using :func:`numpy.array` or + :func:`numpy.asarray`, which will call its ``__array__`` method to obtain a + standard ``numpy.ndarray``. + + >>> np.asarray(arr) + array([[1., 0., 0., 0., 0.], + [0., 1., 0., 0., 0.], + [0., 0., 1., 0., 0.], + [0., 0., 0., 1., 0.], + [0., 0., 0., 0., 1.]]) + + Using ``dtype`` should return an appropriate ndarray or raise an error: + + >>> np.asarray(arr, dtype=np.float32) + array([[1., 0., 0., 0., 0.], + [0., 1., 0., 0., 0.], + [0., 0., 1., 0., 0.], + [0., 0., 0., 1., 0.], + [0., 0., 0., 0., 1.]], dtype=float32) + + If we operate on ``arr`` with a numpy function, numpy will again use the + ``__array__`` interface to convert it to an array and then apply the function + in the usual way. + + >>> np.multiply(arr, 2) + array([[2., 0., 0., 0., 0.], + [0., 2., 0., 0., 0.], + [0., 0., 2., 0., 0.], + [0., 0., 0., 2., 0.], + [0., 0., 0., 0., 2.]]) + + Notice that the return type is a standard ``numpy.ndarray``. + + >>> type(np.multiply(arr, 2)) + + .. _matrix-objects: Matrix objects @@ -479,16 +539,16 @@ Example: >>> import numpy as np - >>> a = np.memmap('newfile.dat', dtype=float, mode='w+', shape=1000) + >>> a = np.memmap('newfile.dat', dtype=np.float64, mode='w+', shape=1000) >>> a[10] = 10.0 >>> a[30] = 30.0 >>> del a - >>> b = np.fromfile('newfile.dat', dtype=float) + >>> b = np.fromfile('newfile.dat', dtype=np.float64) >>> print(b[10], b[30]) 10.0 30.0 - >>> a = np.memmap('newfile.dat', dtype=float) + >>> a = np.memmap('newfile.dat', dtype=np.float64) >>> print(a[10], a[30]) 10.0 30.0 @@ -508,6 +568,10 @@ Character arrays (:mod:`numpy.char`) `dtype` `object_`, `bytes_` or `str_`, and use the free functions in the `numpy.char` module for fast vectorized string operations. +.. deprecated:: 2.5 + ``numpy.char.chararray`` is deprecated. Use an ``ndarray`` with a string or + bytes dtype instead. + These are enhanced arrays of either :class:`str_` type or :class:`bytes_` type. These arrays inherit from the :class:`ndarray`, but specially-define the operations ``+``, ``*``, diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index c0ec6a572c83..1d8201bbce1b 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -124,10 +124,10 @@ datetime type with generic units. >>> import numpy as np - >>> np.array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='datetime64') + >>> np.array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype=np.datetime64) array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='datetime64[D]') - >>> np.array(['2001-01-01T12:00', '2002-02-03T13:56:03.172'], dtype='datetime64') + >>> np.array(['2001-01-01T12:00', '2002-02-03T13:56:03.172'], dtype=np.datetime64) array(['2001-01-01T12:00:00.000', '2002-02-03T13:56:03.172'], dtype='datetime64[ms]') @@ -284,6 +284,8 @@ Datetime units The Datetime and Timedelta data types support a large number of time units, as well as generic units which can be coerced into any of the other units based on input data. +The generic units are deprecated since NumPy 2.5 +and will raise an error in the future. Migration guidance is provided in the `migration guide for deprecation of generic units`_ section below. Datetimes are always stored with an epoch of 1970-01-01T00:00. This means the supported dates are @@ -345,6 +347,12 @@ The protocol is described in the following table: Generic units `datetime.date` ``int`` ================================ ================================= ================================== + +.. deprecated:: 2.5 + The generic units of `timedelta64` are deprecated since NumPy 2.5 and + will raise an error in the future. + + .. admonition:: Example .. try_examples:: @@ -635,3 +643,46 @@ given below. A 472, by Stephenson et.al. `_. A sensible estimate is `50491112870 Âą 90` seconds, with a difference of 10330 seconds. + + +.. _migration_guide_generic_units: + +Migration guide for deprecation of generic units +================================================ + +The generic units of `timedelta64` are deprecated since NumPy 2.5 +and will raise an error in the future. +This section provides guidance on how to update code +that uses generic units of `timedelta64` to avoid future errors. + +The straight forward way is to replace the generic unit with a specific time unit such as 'D' (day), 'h' (hour), 'm' (minute), 's' (second), etc. The choice of the specific time unit will depend on the context of your code and the level of precision you require. + + +.. admonition:: Example + + .. try_examples:: + + >>> import numpy as np + + >>> # Old code using generic units of timedelta64 + >>> np.timedelta64(5, "s") + 1 + DeprecationWarning: The 'generic' unit for NumPy timedelta is deprecated, and will raise an error in the future. This includes implicit conversion of bare integers (e.g. `+ 1`).Please use a specific unit instead. + + >>> # Updated code using a specific time unit + >>> np.timedelta64(5, "s") + np.timedelta64(1, "s") + np.timedelta64(6,'s') + + + When comparing `timedelta64` objects, make sure to use the same specific time unit for both operands even if they are representing ``0``. + + >>> np.timedelta64(0, "s") == 0 + DeprecationWarning: The 'generic' unit for NumPy timedelta is deprecated, and will raise an error in the future. This includes implicit conversion of bare integers (e.g. `== 0`).Please use a specific unit instead. + np.True_ + + >>> np.timedelta64(0, "s") == np.timedelta64(0, "s") + np.True_ + + When using ``numpy.testing.assert_allclose`` to compare `timedelta64` objects, ensure to set a specific time unit to ``atol`` parameter as well. + + >>> arr = np.ones(5, dtype='m8[s]') + >>> np.testing.assert_allclose(arr, np.timedelta64(1, "s"), atol=np.timedelta64(0, "s")) diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst index fcb3e122e6de..b1887fc09d23 100644 --- a/doc/source/reference/arrays.dtypes.rst +++ b/doc/source/reference/arrays.dtypes.rst @@ -258,21 +258,29 @@ Array-protocol type strings (see :ref:`arrays.interface`) which represents boolean. The item size must correspond to an existing type, or an error will be raised. The supported kinds are - ================= ======================== - ``'?'``, ``'b1'`` boolean - ``'b'`` (signed) byte - ``'B'`` unsigned byte - ``'i'`` (signed) integer - ``'u'`` unsigned integer - ``'f'`` floating-point - ``'c'`` complex-floating point - ``'m'`` timedelta - ``'M'`` datetime - ``'O'`` (Python) objects - ``'S'``, ``'a'`` zero-terminated bytes (not recommended) - ``'U'`` Unicode string - ``'V'`` raw data (:class:`void`) - ================= ======================== + ================== ======================== + ``'?'`` boolean + ``'b'`` (signed) byte + ``'B'`` unsigned byte + ``'h'`` (signed) short + ``'H'`` unsigned short + ``'i'`` (signed) integer + ``'I'`` unsigned integer + ``'l'`` (signed) long integer + ``'L'`` unsigned long integer + ``'q'`` (signed) long long integer + ``'Q'`` unsigned long long integer + ``'f'`` single precision + ``'F'`` complex single precision + ``'d'`` double precision + ``'D'`` complex double precision + ``'g'`` long precision + ``'G'`` complex long double precision + ``'O'`` (Python) objects + ``'S'`` zero-terminated bytes (not recommended) + ``'U'`` Unicode string + ``'V'`` raw data (:class:`void`) + ================== ======================== .. admonition:: Example @@ -286,15 +294,6 @@ Array-protocol type strings (see :ref:`arrays.interface`) >>> dt = np.dtype('S25') # 25-length zero-terminated bytes >>> dt = np.dtype('U25') # 25-character string - .. _string-dtype-note: - - .. admonition:: Note on string types - - For backward compatibility with existing code originally written to support - Python 2, ``S`` and ``a`` typestrings are zero-terminated bytes. - For unicode strings, use ``U``, `numpy.str_`. For signed bytes that do not - need zero-termination ``b`` or ``i1`` can be used. - String with comma-separated fields A short-hand notation for specifying the format of a structured data type is a comma-separated string of basic formats. @@ -561,7 +560,7 @@ This equivalence can only be handled through ``==``, not through ``is``. >>> import numpy as np - >>> a = np.array([1, 2], dtype=float) + >>> a = np.array([1, 2], dtype=np.float64) >>> a.dtype == np.dtype(np.float64) True >>> a.dtype == np.float64 diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst index 4dca5b541a38..17922bbb7a44 100644 --- a/doc/source/reference/arrays.ndarray.rst +++ b/doc/source/reference/arrays.ndarray.rst @@ -618,3 +618,27 @@ Utility method for typing: :toctree: generated/ ndarray.__class_getitem__ + +.. _arrays.ndarray.pattern-matching: + +Structural pattern matching +=========================== + +Arrays support :pep:`structural pattern matching <634>`. The array is matched +as a sequence, so you can unpack arrays along the first dimension in +``match``/``case`` statements:: + + >>> arr = np.array([[1, 2], [3, 4]]) + >>> match arr: + ... case [row1, row2]: + ... print(f"row1={row1}, row2={row2}") + row1=[1 2], row2=[3 4] + +Nested patterns work too, matching inner dimensions:: + + >>> match arr: + ... case [[a, b], [c, d]]: + ... print(f"a={a}, b={b}, c={c}, d={d}") + a=1, b=2, c=3, d=4 + +All ndarray subclasses inherit this behavior. diff --git a/doc/source/reference/arrays.promotion.rst b/doc/source/reference/arrays.promotion.rst index d2dead0ce7b5..32e503383217 100644 --- a/doc/source/reference/arrays.promotion.rst +++ b/doc/source/reference/arrays.promotion.rst @@ -79,10 +79,10 @@ their precision when determining the result dtype. This is often convenient. For instance, when working with arrays of a low precision dtype, it is usually desirable for simple operations with Python scalars to preserve the dtype. - >>> arr_float32 = np.array([1, 2.5, 2.1], dtype="float32") + >>> arr_float32 = np.array([1, 2.5, 2.1], dtype=np.float32) >>> arr_float32 + 10.0 # undesirable to promote to float64 array([11. , 12.5, 12.1], dtype=float32) - >>> arr_int16 = np.array([3, 5, 7], dtype="int16") + >>> arr_int16 = np.array([3, 5, 7], dtype=np.int16) >>> arr_int16 + 10 # undesirable to promote to int64 array([13, 15, 17], dtype=int16) @@ -130,7 +130,7 @@ overflows: ... RuntimeWarning: overflow encountered in scalar add Note that NumPy warns when overflows occur for scalars, but not for arrays; -e.g., ``np.array(100, dtype="uint8") + 100`` will *not* warn. +e.g., ``np.array(100, dtype=np.uint8) + 100`` will *not* warn. Numerical promotion ------------------- diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index f859db4620d4..10223456bb6c 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -327,8 +327,6 @@ elements the data type consists of.) .. warning:: - See :ref:`Note on string types`. - Numeric Compatibility: If you used old typecode characters in your Numeric code (which was never recommended), you will need to change some of them to the new characters. In particular, the needed diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index d07a00ebde73..92b57daf04c9 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -841,6 +841,35 @@ cannot not be accessed directly. The shape (always C-style contiguous) of the sub-array as a Python tuple. +.. c:function:: char PyDataType_TYPE(PyArray_Descr *descr) + + .. versionadded:: 2.5 + + Data type character code. See `numpy.dtype.char`. Only set for built-in and + legacy user DTypes. Null character (``b'\x00'``) otherwise. + +.. c:function:: char PyDataType_KIND(PyArray_Descr *descr) + + .. versionadded:: 2.5 + + Data type kind character code. See `numpy.dtype.kind`. Only set for built-in + and legacy user DTypes. Null character (``b'\x00``) otherwise. + +.. c:function:: char PyDataType_BYTEORDER(PyArray_Descr *descr) + + .. versionadded:: 2.5 + + Data type bytorder character code. One of ``'='`` (native), ``'<'`` + (little-endian), ``'>'`` (big-endian), or ``'|'`` (not applicable). See + `numpy.dtype.byteorder`. + +.. c:function:: PyTypeObject *PyDataType_TYPEOBJ(PyArray_Descr *descr) + + .. versionadded:: 2.5 + + The type object for the scalar type. See the ``typeobj`` member of the + ``PyArray_Descr`` struct. See :c:data:`PyArray_Descr` for a full description + of the ``PyArray_Descr`` struct layout. Data-type checking ~~~~~~~~~~~~~~~~~~ @@ -1786,9 +1815,9 @@ the functions that must be implemented for each slot. - ``0.0`` is the default for ``sum([])``. But ``-0.0`` is the correct identity otherwise as it preserves the sign for ``sum([-0.0])``. - We use no identity for object, but return the default of ``0`` and - ``1`` for the empty ``sum([], dtype=object)`` and - ``prod([], dtype=object)``. - This allows ``np.sum(np.array(["a", "b"], dtype=object))`` to work. + ``1`` for the empty ``sum([], dtype=np.object_)`` and + ``prod([], dtype=np.object_)``. + This allows ``np.sum(np.array(["a", "b"], dtype=np.object_))`` to work. - ``-inf`` or ``INT_MIN`` for ``max`` is an identity, but at least ``INT_MIN`` not a good *default* when there are no items. @@ -1899,6 +1928,9 @@ with the rest of the ArrayMethod API. entry points, ``(module ':')? (object '.')* name``, with ``numpy`` the default module. Examples: ``sin``, ``strings.str_len``, ``numpy.strings:str_len``. + Note that some names are supported but do not directly correspond + to ufuncs: ``"sort"``, ``"argsort"``, ``"real"``, ``"imag"``. + (These do use ufunc-likes or even ufuncs internally.) .. c:member:: PyArrayMethod_Spec *spec @@ -1911,7 +1943,9 @@ with the rest of the ArrayMethod API. Add multiple loops to ufuncs from ArrayMethod specs. This also handles the registration of methods for the ufunc-like functions - ``sort`` and ``argsort``. See :ref:`array-methods-sorting` for details. + ``sort`` and ``argsort`` (see :ref:`array-methods-sorting` for details), + as well as for the array attributes ``.real`` and ``.imag`` needed + for user defined complex DTypes (with ``"real"`` and ``"imag"`` as names). The ``slots`` argument must be a NULL-terminated array of `PyUFunc_LoopSlot` (see above), which give the name of the @@ -1942,6 +1976,22 @@ with the rest of the ArrayMethod API. attempt a search for a new loop or promoter that can accomplish the operation by casting the inputs to the "promoted" DTypes. + A promoter should honor ``signature[]`` (if set). A promoter must return ``-1`` + on failure. A Python error may be set but is not required (a general error is + set in either paths, although the original error is chained). + A promoter must return ``0`` or ``1`` on success. NumPy normally checks that + ``new_op_dtypes`` are different from ``op_dtypes`` to prevent recursion. + This check is skipped if the promoter returns ``1``, which allows the promoter + to add a new loop (when adding a new loop, ``new_op_dtypes`` should be identical + to ``op_dtypes``). + + .. versionchanged:: 2.5 + After 2.5 a return of ``1`` indicates that the promoter was successful + skipping a recursion protection step. + This mainly allows the promoter to add new loop to the ufunc that must + now match instead of the promoter itself. + (Normally, a promoter must modify the DTypes help find the right loop.) + .. c:function:: int PyUFunc_GiveFloatingpointErrors( \ const char *name, int fpe_errors) @@ -2254,12 +2304,24 @@ Shape Manipulation a different total number of elements then the old shape. If reallocation is necessary, then *self* must own its data, have *self* - ``>base==NULL``, have *self* - ``>weakrefs==NULL``, and (unless refcheck is 0) not be - referenced by any other array. The fortran argument can be - :c:data:`NPY_ANYORDER`, :c:data:`NPY_CORDER`, or - :c:data:`NPY_FORTRANORDER`. It currently has no effect. Eventually it - could be used to determine how the resize operation should view the data - when constructing a differently-dimensioned array. Returns None on success - and NULL on error. + referenced by any other array. The *fortran* argument has no effect. + + On Python 3.13 and older, the check allows uniquely referenced objects and + objects with exactly one reference to be reallocated in-place. On Python + 3.14 and newer, the array must be uniquely referenced. See the Python 3.14 + `What's New entry + `_ on + this topic for more information on why there is a behavior difference. + + Reallocating arrays in-place can often lead to memory fragmentation and + should be avoided. If the goal is to reclaim over-allocated memory, + alternatives are to create a view or a copy of just the desired data, or + using two passes to build the array: one to cheaply determine the shape and + another to allocate and fill. Benchmark your use case to determine what is + optimum. You may be surprised to find ``resize`` actually slows down or + bloats your application. + + Returns None on success and NULL on error. .. c:function:: PyObject* PyArray_Transpose( \ PyArrayObject* self, PyArray_Dims* permute) @@ -2365,7 +2427,7 @@ Item selection and manipulation Return an array with the items of ``self`` sorted along ``axis``. The array is sorted using an algorithm whose properties are specified by the value of - ``kind``, an integer/enum specifying the reguirements of the sorting + ``kind``, an integer/enum specifying the requirements of the sorting algorithm used. If ``self* ->descr`` is a data-type with fields defined, then ``self->descr->names`` is used to determine the sort order. A comparison where the first field is equal will use the second field and so on. To @@ -2382,7 +2444,7 @@ Item selection and manipulation Return an array of indices such that selection of these indices along the given ``axis`` would return a sorted version of ``self``. The array is sorted using an algorithm whose properties are specified by ``kind``, an - integer/enum specifying the reguirements of the sorting algorithm used. If + integer/enum specifying the requirements of the sorting algorithm used. If ``self->descr`` is a data-type with fields defined, then ``self->descr->names`` is used to determine the sort order. A comparison where the first field is equal will use the second field and so on. To @@ -4261,9 +4323,9 @@ Memory management .. c:function:: int PyArray_ResolveWritebackIfCopy(PyArrayObject* obj) If ``obj->flags`` has :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, this function - clears the flags, `DECREF` s - `obj->base` and makes it writeable, and sets ``obj->base`` to NULL. It then - copies ``obj->data`` to `obj->base->data`, and returns the error state of + clears the flags, ``DECREF`` s + ``obj->base`` and makes it writeable, and sets ``obj->base`` to NULL. It then + copies ``obj->data`` to ``obj->base->data``, and returns the error state of the copy operation. This is the opposite of :c:func:`PyArray_SetWritebackIfCopyBase`. Usually this is called once you are finished with ``obj``, just before ``Py_DECREF(obj)``. It may be called @@ -4272,6 +4334,8 @@ Memory management Returns 0 if nothing was done, -1 on error, and 1 if action was taken. +.. _array.ndarray.capi.threading: + Threading support ~~~~~~~~~~~~~~~~~ @@ -4497,9 +4561,9 @@ Miscellaneous Macros If ``obj->flags`` has :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, this function clears the flags, `DECREF` s - `obj->base` and makes it writeable, and sets ``obj->base`` to NULL. In + ``obj->base`` and makes it writeable, and sets ``obj->base`` to NULL. In contrast to :c:func:`PyArray_ResolveWritebackIfCopy` it makes no attempt - to copy the data from `obj->base`. This undoes + to copy the data from ``obj->base``. This undoes :c:func:`PyArray_SetWritebackIfCopyBase`. Usually this is called after an error when you are finished with ``obj``, just before ``Py_DECREF(obj)``. It may be called multiple times, or with ``NULL`` input. diff --git a/doc/source/reference/c-api/coremath.rst b/doc/source/reference/c-api/coremath.rst index cc46ba744a49..b2e3af4c0944 100644 --- a/doc/source/reference/c-api/coremath.rst +++ b/doc/source/reference/c-api/coremath.rst @@ -317,20 +317,6 @@ The generic steps to take are: machine. Otherwise you pick up a static library built for the wrong architecture. -When you build with ``numpy.distutils`` (deprecated), then use this in your ``setup.py``: - - .. hidden in a comment so as to be included in refguide but not rendered documentation - >>> import numpy.distutils.misc_util - >>> config = np.distutils.misc_util.Configuration(None, '', '.') - >>> with open('foo.c', 'w') as f: pass - - >>> from numpy.distutils.misc_util import get_info - >>> info = get_info('npymath') - >>> _ = config.add_extension('foo', sources=['foo.c'], extra_info=info) - -In other words, the usage of ``info`` is exactly the same as when using -``blas_info`` and co. - When you are building with `Meson `__, use:: # Note that this will get easier in the future, when Meson has diff --git a/doc/source/reference/c-api/generalized-ufuncs.rst b/doc/source/reference/c-api/generalized-ufuncs.rst index 755ab2141cbd..7a436b102600 100644 --- a/doc/source/reference/c-api/generalized-ufuncs.rst +++ b/doc/source/reference/c-api/generalized-ufuncs.rst @@ -4,6 +4,8 @@ Generalized universal function API ================================== +.. seealso:: :ref:`ufuncs` + There is a general need for looping over not only functions on scalars but also over functions on vectors (or arrays). This concept is realized in NumPy by generalizing the universal functions @@ -61,7 +63,7 @@ distances among them. The output dimension ``p`` must therefore be equal to in an output array of the right size. If the size of a core dimension of an output cannot be determined from a passed in input or output array, an error will be raised. This can be changed by defining a ``PyUFunc_ProcessCoreDimsFunc`` function -and assigning it to the ``proces_core_dims_func`` field of the ``PyUFuncObject`` +and assigning it to the ``process_core_dims_func`` field of the ``PyUFuncObject`` structure. See below for more details. Note: Prior to NumPy 1.10.0, less strict checks were in place: missing core diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index a039af130860..2cd423fdd30a 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -985,7 +985,7 @@ PyUFunc_Type and PyUFuncObject PyUFuncGenericFunction *functions; void **data; int ntypes; - int reserved1; + int _ufunc_flags; const char *name; char *types; const char *doc; diff --git a/doc/source/reference/constants.rst b/doc/source/reference/constants.rst index 79d758bddada..00a2d607b356 100644 --- a/doc/source/reference/constants.rst +++ b/doc/source/reference/constants.rst @@ -8,7 +8,7 @@ NumPy includes several constants: .. data:: e - Euler's constant, base of natural logarithms, Napier's constant. + Euler's number, base of natural logarithms, Napier's constant. ``e = 2.71828182845904523536028747135266249775724709369995...`` diff --git a/doc/source/reference/distutils.rst b/doc/source/reference/distutils.rst deleted file mode 100644 index 714c8836322e..000000000000 --- a/doc/source/reference/distutils.rst +++ /dev/null @@ -1,219 +0,0 @@ -.. _numpy-distutils-refguide: - -********* -Packaging -********* - -.. module:: numpy.distutils - -.. warning:: - - ``numpy.distutils`` is deprecated, and will be removed for - Python >= 3.12. For more details, see :ref:`distutils-status-migration` - -.. warning:: - - Note that ``setuptools`` does major releases often and those may contain - changes that break :mod:`numpy.distutils`, which will *not* be updated anymore - for new ``setuptools`` versions. It is therefore recommended to set an - upper version bound in your build configuration for the last known version - of ``setuptools`` that works with your build. - -NumPy provides enhanced distutils functionality to make it easier to -build and install sub-packages, auto-generate code, and extension -modules that use Fortran-compiled libraries. A useful :class:`Configuration -` class is also provided in -:mod:`numpy.distutils.misc_util` that can make it easier to construct -keyword arguments to pass to the setup function (by passing the -dictionary obtained from the todict() method of the class). More -information is available in the :ref:`distutils-user-guide`. - -The choice and location of linked libraries such as BLAS and LAPACK as well as -include paths and other such build options can be specified in a ``site.cfg`` -file located in the NumPy root repository or a ``.numpy-site.cfg`` file in your -home directory. See the ``site.cfg.example`` example file included in the NumPy -repository or sdist for documentation. - -.. index:: - single: distutils - - -Modules in :mod:`numpy.distutils` -================================= -.. toctree:: - :maxdepth: 2 - - distutils/misc_util - - -.. currentmodule:: numpy.distutils - -.. autosummary:: - :toctree: generated/ - - ccompiler - ccompiler_opt - cpuinfo.cpu - core.Extension - exec_command - log.set_verbosity - system_info.get_info - system_info.get_standard_file - - -Configuration class -=================== - -.. currentmodule:: numpy.distutils.misc_util - -.. class:: Configuration(package_name=None, parent_name=None, top_path=None, package_path=None, **attrs) - - Construct a configuration instance for the given package name. If - *parent_name* is not None, then construct the package as a - sub-package of the *parent_name* package. If *top_path* and - *package_path* are None then they are assumed equal to - the path of the file this instance was created in. The setup.py - files in the numpy distribution are good examples of how to use - the :class:`Configuration` instance. - - .. automethod:: todict - - .. automethod:: get_distribution - - .. automethod:: get_subpackage - - .. automethod:: add_subpackage - - .. automethod:: add_data_files - - .. automethod:: add_data_dir - - .. automethod:: add_include_dirs - - .. automethod:: add_headers - - .. automethod:: add_extension - - .. automethod:: add_library - - .. automethod:: add_scripts - - .. automethod:: add_installed_library - - .. automethod:: add_npy_pkg_config - - .. automethod:: paths - - .. automethod:: get_config_cmd - - .. automethod:: get_build_temp_dir - - .. automethod:: have_f77c - - .. automethod:: have_f90c - - .. automethod:: get_version - - .. automethod:: make_svn_version_py - - .. automethod:: make_config_py - - .. automethod:: get_info - -Building installable C libraries -================================ - -Conventional C libraries (installed through `add_library`) are not installed, and -are just used during the build (they are statically linked). An installable C -library is a pure C library, which does not depend on the python C runtime, and -is installed such that it may be used by third-party packages. To build and -install the C library, you just use the method `add_installed_library` instead of -`add_library`, which takes the same arguments except for an additional -``install_dir`` argument:: - - .. hidden in a comment so as to be included in refguide but not rendered documentation - >>> import numpy.distutils.misc_util - >>> config = np.distutils.misc_util.Configuration(None, '', '.') - >>> with open('foo.c', 'w') as f: pass - - >>> config.add_installed_library('foo', sources=['foo.c'], install_dir='lib') - -npy-pkg-config files --------------------- - -To make the necessary build options available to third parties, you could use -the `npy-pkg-config` mechanism implemented in `numpy.distutils`. This mechanism is -based on a .ini file which contains all the options. A .ini file is very -similar to .pc files as used by the pkg-config unix utility:: - - [meta] - Name: foo - Version: 1.0 - Description: foo library - - [variables] - prefix = /home/user/local - libdir = ${prefix}/lib - includedir = ${prefix}/include - - [default] - cflags = -I${includedir} - libs = -L${libdir} -lfoo - -Generally, the file needs to be generated during the build, since it needs some -information known at build time only (e.g. prefix). This is mostly automatic if -one uses the `Configuration` method `add_npy_pkg_config`. Assuming we have a -template file foo.ini.in as follows:: - - [meta] - Name: foo - Version: @version@ - Description: foo library - - [variables] - prefix = @prefix@ - libdir = ${prefix}/lib - includedir = ${prefix}/include - - [default] - cflags = -I${includedir} - libs = -L${libdir} -lfoo - -and the following code in setup.py:: - - >>> config.add_installed_library('foo', sources=['foo.c'], install_dir='lib') - >>> subst = {'version': '1.0'} - >>> config.add_npy_pkg_config('foo.ini.in', 'lib', subst_dict=subst) - -This will install the file foo.ini into the directory package_dir/lib, and the -foo.ini file will be generated from foo.ini.in, where each ``@version@`` will be -replaced by ``subst_dict['version']``. The dictionary has an additional prefix -substitution rule automatically added, which contains the install prefix (since -this is not easy to get from setup.py). - -Reusing a C library from another package ----------------------------------------- - -Info are easily retrieved from the `get_info` function in -`numpy.distutils.misc_util`:: - - >>> info = np.distutils.misc_util.get_info('npymath') - >>> config.add_extension('foo', sources=['foo.c'], extra_info=info) - - - -An additional list of paths to look for .ini files can be given to `get_info`. - -Conversion of ``.src`` files -============================ - -NumPy distutils supports automatic conversion of source files named -.src. This facility can be used to maintain very similar -code blocks requiring only simple changes between blocks. During the -build phase of setup, if a template file named .src is -encountered, a new file named is constructed from the -template and placed in the build directory to be used instead. Two -forms of template conversion are supported. The first form occurs for -files named .ext.src where ext is a recognized Fortran -extension (f, f90, f95, f77, for, ftn, pyf). The second form is used -for all other cases. See :ref:`templating`. diff --git a/doc/source/reference/distutils/misc_util.rst b/doc/source/reference/distutils/misc_util.rst deleted file mode 100644 index bbb83a5ab061..000000000000 --- a/doc/source/reference/distutils/misc_util.rst +++ /dev/null @@ -1,7 +0,0 @@ -distutils.misc_util -=================== - -.. automodule:: numpy.distutils.misc_util - :members: - :undoc-members: - :exclude-members: Configuration diff --git a/doc/source/reference/distutils_guide.rst b/doc/source/reference/distutils_guide.rst deleted file mode 100644 index 0a815797ac30..000000000000 --- a/doc/source/reference/distutils_guide.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _distutils-user-guide: - -``numpy.distutils`` user guide -============================== - -.. warning:: - - ``numpy.distutils`` is deprecated, and will be removed for - Python >= 3.12. For more details, see :ref:`distutils-status-migration` - - -.. include:: ../../DISTUTILS.rst - :start-line: 6 diff --git a/doc/source/reference/distutils_status_migration.rst b/doc/source/reference/distutils_status_migration.rst index 366b0e67f06a..e4ca4fedcf81 100644 --- a/doc/source/reference/distutils_status_migration.rst +++ b/doc/source/reference/distutils_status_migration.rst @@ -3,16 +3,7 @@ Status of ``numpy.distutils`` and migration advice ================================================== -`numpy.distutils` has been deprecated in NumPy ``1.23.0``. It will be removed -for Python 3.12; for Python <= 3.11 it will not be removed until 2 years after -the Python 3.12 release (Oct 2025). - - -.. warning:: - - ``numpy.distutils`` is only tested with ``setuptools < 60.0``, newer - versions may break. See :ref:`numpy-setuptools-interaction` for details. - +``numpy.distutils`` was removed in NumPy ``2.5.0``. Migration advice ---------------- @@ -27,7 +18,7 @@ using a well-designed, modern and reliable build system, we recommend: If you have modest needs (only simple Cython/C extensions; no need for Fortran, BLAS/LAPACK, nested ``setup.py`` files, or other features of -``numpy.distutils``) and have been happy with ``numpy.distutils`` so far, you +``numpy.distutils``) and have been happy with ``numpy.distutils``, you can also consider switching to ``setuptools``. Note that most functionality of ``numpy.distutils`` is unlikely to be ported to ``setuptools``. @@ -47,7 +38,7 @@ migrating. For more details about the SciPy migration, see: - `RFC: switch to Meson as a build system `__ - `Tracking issue for Meson support `__ -NumPy will migrate to Meson for the 1.26 release. +NumPy migrated to Meson for the 1.26 release. Moving to CMake / scikit-build @@ -73,15 +64,12 @@ present in ``setuptools``: - Support for a few other scientific libraries, like FFTW and UMFPACK - Better MinGW support - Per-compiler build flag customization (e.g. `-O3` and `SSE2` flags are default) -- a simple user build config system, see `site.cfg.example `__ +- a simple user build config system, see `site.cfg.example `__ - SIMD intrinsics support - Support for the NumPy-specific ``.src`` templating format for ``.c``/``.h`` files -The most widely used feature is nested ``setup.py`` files. This feature may -perhaps still be ported to ``setuptools`` in the future (it needs a volunteer -though, see `gh-18588 `__ for -status). Projects only using that feature could move to ``setuptools`` after -that is done. In case a project uses only a couple of ``setup.py`` files, it +The most widely used feature is nested ``setup.py`` files. In case a project +uses only a couple of ``setup.py`` files, it also could make sense to simply aggregate all the content of those files into a single ``setup.py`` file and then move to ``setuptools``. This involves dropping all ``Configuration`` instances, and using ``Extension`` instead. @@ -100,29 +88,6 @@ E.g.,:: For more details, see the `setuptools documentation `__ - -.. _numpy-setuptools-interaction: - -Interaction of ``numpy.distutils`` with ``setuptools`` ------------------------------------------------------- - -It is recommended to use ``setuptools < 60.0``. Newer versions may work, but -are not guaranteed to. The reason for this is that ``setuptools`` 60.0 enabled -a vendored copy of ``distutils``, including backwards incompatible changes that -affect some functionality in ``numpy.distutils``. - -If you are using only simple Cython or C extensions with minimal use of -``numpy.distutils`` functionality beyond nested ``setup.py`` files (its most -popular feature, see :class:`Configuration `), -then latest ``setuptools`` is likely to continue working. In case of problems, -you can also try ``SETUPTOOLS_USE_DISTUTILS=stdlib`` to avoid the backwards -incompatible changes in ``setuptools``. - -Whatever you do, it is recommended to put an upper bound on your ``setuptools`` -build requirement in ``pyproject.toml`` to avoid future breakage - see -:ref:`for-downstream-package-authors`. - - .. _CMake: https://cmake.org/ .. _Meson: https://mesonbuild.com/ .. _meson-python: https://meson-python.readthedocs.io diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index aa6c692d6b2b..2a7ac83a96ca 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -40,7 +40,6 @@ Python API :maxdepth: 1 typing - distutils C API ===== @@ -63,7 +62,6 @@ Other topics security testing distutils_status_migration - distutils_guide swig diff --git a/doc/source/reference/module_structure.rst b/doc/source/reference/module_structure.rst index 98e3dda54e7b..5c6d8139b055 100644 --- a/doc/source/reference/module_structure.rst +++ b/doc/source/reference/module_structure.rst @@ -44,7 +44,6 @@ Prefer not to use these namespaces for new code. There are better alternatives and/or this code is deprecated or isn't reliable. - :ref:`numpy.char ` - legacy string functionality, only for fixed-width strings -- :ref:`numpy.distutils ` (deprecated) - build system support - :ref:`numpy.f2py ` - Fortran binding generation (usually used from the command line only) - :ref:`numpy.ma ` - masked arrays (not very reliable, needs an overhaul) - :ref:`numpy.matlib ` (pending deprecation) - functions supporting ``matrix`` instances @@ -70,7 +69,6 @@ and/or this code is deprecated or isn't reliable. numpy.rec numpy.version numpy.char - numpy.distutils numpy.f2py <../f2py/index> numpy.ma numpy.matlib diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst index f59a2182052b..6da0a8c4e0a0 100644 --- a/doc/source/reference/random/index.rst +++ b/doc/source/reference/random/index.rst @@ -168,9 +168,9 @@ Features Parallel Applications Multithreaded Generation - new-or-different + New or Different Comparing Performance - c-api + C API Examples of using Numba, Cython, CFFI Original Source of the Generator and BitGenerators diff --git a/doc/source/reference/random/multithreading.rst b/doc/source/reference/random/multithreading.rst index 73d2fc9ee5ad..28e045f10dc0 100644 --- a/doc/source/reference/random/multithreading.rst +++ b/doc/source/reference/random/multithreading.rst @@ -9,6 +9,9 @@ well-behaved (writable and aligned). Under normal circumstances, arrays created using the common constructors such as :meth:`numpy.empty` will satisfy these requirements. +.. seealso:: + :ref:`thread_safety` for general information about thread safety in NumPy. + This example makes use of :mod:`concurrent.futures` to fill an array using multiple threads. Threads are long-lived so that repeated calls do not require any additional overheads from thread creation. diff --git a/doc/source/reference/routines.char.rst b/doc/source/reference/routines.char.rst index 92c605071e50..7dfb2e6a18e3 100644 --- a/doc/source/reference/routines.char.rst +++ b/doc/source/reference/routines.char.rst @@ -9,9 +9,8 @@ Legacy fixed-width string functionality .. legacy:: - The string operations in this module, as well as the `numpy.char.chararray` - class, are planned to be deprecated in the future. Use `numpy.strings` - instead. + The string operations in this module are planned to be deprecated in the future, and + the `numpy.char.chararray` class is deprecated in NumPy 2.5. Use `numpy.strings` instead. The `numpy.char` module provides a set of vectorized string operations for arrays of type `numpy.str_` or `numpy.bytes_`. For example diff --git a/doc/source/reference/routines.emath.rst b/doc/source/reference/routines.emath.rst index 7751c922b677..4b4ee208734f 100644 --- a/doc/source/reference/routines.emath.rst +++ b/doc/source/reference/routines.emath.rst @@ -15,6 +15,7 @@ domains of the input. For example, for functions like `log` with branch cuts, the versions in this module provide the mathematically valid answers in the complex plane:: + >>> import numpy as np >>> import math >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi) True diff --git a/doc/source/reference/routines.err.rst b/doc/source/reference/routines.err.rst index 5272073a3b00..f46634793fa3 100644 --- a/doc/source/reference/routines.err.rst +++ b/doc/source/reference/routines.err.rst @@ -1,8 +1,80 @@ +.. _fp_error_handling: + Floating point error handling ============================= .. currentmodule:: numpy +Error handling settings are stored in :py:mod:`python:contextvars` +allowing different threads or async tasks to have independent configurations. +For more information, see :ref:`thread_safety`. + +.. _misc-error-handling: + +How numpy handles numerical exceptions +-------------------------------------- + +The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` +and ``'ignore'`` for ``underflow``. But this can be changed, and it can be +set individually for different kinds of exceptions. The different behaviors +are: + +- ``'ignore'`` : Take no action when the exception occurs. +- ``'warn'`` : Print a :py:exc:`RuntimeWarning` (via the Python :py:mod:`warnings` module). +- ``'raise'`` : Raise a :py:exc:`FloatingPointError`. +- ``'call'`` : Call a specified function. +- ``'print'`` : Print a warning directly to ``stdout``. +- ``'log'`` : Record error in a Log object. + +These behaviors can be set for all kinds of errors or specific ones: + +- ``all`` : apply to all numeric exceptions +- ``invalid`` : when NaNs are generated +- ``divide`` : divide by zero (for integers as well!) +- ``overflow`` : floating point overflows +- ``underflow`` : floating point underflows + +Note that integer divide-by-zero is handled by the same machinery. + +The error handling mode can be configured :func:`numpy.errstate` +context manager. + +Examples +-------- + +:: + + >>> with np.errstate(all='warn'): + ... np.zeros(5, dtype=np.float32) / 0.0 + :2: RuntimeWarning: invalid value encountered in divide + array([nan, nan, nan, nan, nan], dtype=float32) + +:: + + >>> with np.errstate(under='ignore'): + ... np.array([1.e-100])**10 + array([0.]) + +:: + + >>> with np.errstate(invalid='raise'): + ... np.sqrt(np.array([-1.])) + ... + Traceback (most recent call last): + File "", line 2, in + np.sqrt(np.array([-1.])) + ~~~~~~~^^^^^^^^^^^^^^^^^ + FloatingPointError: invalid value encountered in sqrt + +:: + + >>> def errorhandler(errstr, errflag): + ... print("saw stupid error!") + >>> with np.errstate(call=errorhandler, all='call'): + ... np.zeros(5, dtype=np.int32) / 0 + saw stupid error! + array([nan, nan, nan, nan, nan]) + Setting and getting error handling ---------------------------------- diff --git a/doc/source/reference/routines.io.rst b/doc/source/reference/routines.io.rst index 2b8dd98f36a4..ccd4467af545 100644 --- a/doc/source/reference/routines.io.rst +++ b/doc/source/reference/routines.io.rst @@ -59,8 +59,15 @@ Memory mapping files memmap lib.format.open_memmap +.. _text_formatting_options: + Text formatting options ----------------------- + +Text formatting settings are maintained in a :py:mod:`context variable `, +allowing different threads or async tasks to have independent configurations. +For more information, see :ref:`thread_safety`. + .. autosummary:: :toctree: generated/ diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst index 2b1b5dac1710..c29ccc4a5f24 100644 --- a/doc/source/reference/routines.ma.rst +++ b/doc/source/reference/routines.ma.rst @@ -247,8 +247,8 @@ Conversion operations ma.masked_where -> to a ndarray -~~~~~~~~~~~~~~ +> to an ndarray +~~~~~~~~~~~~~~~ .. autosummary:: :toctree: generated/ diff --git a/doc/source/reference/security.rst b/doc/source/reference/security.rst index 6d8ba75b9d26..a0de8f9e0146 100644 --- a/doc/source/reference/security.rst +++ b/doc/source/reference/security.rst @@ -3,8 +3,6 @@ NumPy security Security issues can be reported privately as described in the project README and when opening a `new issue on the issue tracker `_. -The `Python security reporting guidelines `_ -are a good resource and its notes apply also to NumPy. NumPy's maintainers are not security experts. However, we are conscientious about security and experts of both the NumPy codebase and how it's used. @@ -14,17 +12,31 @@ A security advisory we are not aware of beforehand can lead to a lot of work for all involved parties. -Advice for using NumPy on untrusted data ----------------------------------------- +Important +--------- +NumPy is not designed to be exposed directly to untrusted users. A user who can freely execute NumPy (or Python) functions must be considered -to have the same privilege as the process/Python interpreter. +to have the same privileges as the process/Python interpreter. + +If one can already execute Python code, there are far worse things one can do +than use all available CPU cycles, or provoke a symptom of a bug in Code like +use-after-free or a segfault. Therefore, while such issues may be bugs, they +are not security issues. + +Before reporting a security issue, please consider and describe the attack +vector in detail - and in particular whether that attack vector assumes being +able to freely execute NumPy functions. + + +Advice for using NumPy on untrusted data +---------------------------------------- -That said, NumPy should be generally safe to use on *data* provided by +NumPy should be generally safe to use on *data* provided by unprivileged users and read through safe API functions (e.g. loaded from a text file or ``.npy`` file without pickle support). Malicious *values* or *data sizes* should never lead to privilege escalation. -Note that the above refers to array data. We do not currently consider for +Note that the above refers to *array data*. We do not currently consider for example ``f2py`` to be safe: it is typically used to compile a program that is then run. Any ``f2py`` invocation must thus use the same privilege as the later execution. diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index 229a9ebbae0a..8532ee307fa1 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -1,3 +1,5 @@ +.. _cpu-build-options: + ***************** CPU Build Options ***************** diff --git a/doc/source/reference/thread_safety.rst b/doc/source/reference/thread_safety.rst index 84590bfac39c..b07419259690 100644 --- a/doc/source/reference/thread_safety.rst +++ b/doc/source/reference/thread_safety.rst @@ -5,7 +5,7 @@ Thread Safety ************* NumPy supports use in a multithreaded context via the `threading` module in the -standard library. Many NumPy operations release the GIL, so unlike many +standard library. Many NumPy operations release the :term:`python:GIL`, so unlike many situations in Python, it is possible to improve parallel performance by exploiting multithreaded parallelism in Python. @@ -22,15 +22,27 @@ are not reproducible, let alone correct. It is also possible to crash the Python interpreter by, for example, resizing an array while another thread is reading from it to compute a ufunc operation. -In the future, we may add locking to ndarray to make writing multithreaded +In the future, we may add locking to :class:`~numpy.ndarray` to make writing multithreaded algorithms using NumPy arrays safer, but for now we suggest focusing on read-only access of arrays that are shared between threads, or adding your own locking if you need to mutation and multithreading. Note that operations that *do not* release the GIL will see no performance gains from use of the `threading` module, and instead might be better served with -`multiprocessing`. In particular, operations on arrays with ``dtype=object`` do -not release the GIL. +`multiprocessing`. In particular, operations on arrays with ``dtype=np.object_`` +do not release the GIL. + +Context-local state +------------------- + +NumPy maintains some state for ufuncs context-local basis, which means each +thread in a multithreaded program or task in an asyncio program has its own +independent configuration of the `numpy.errstate` (see +:doc:`/reference/routines.err`), and of :ref:`text_formatting_options`. + +You can update state stored in a context variable by entering a context manager. +As soon as the context manager exits, the state will be reset to its value +before entering the context manager. Free-threaded Python -------------------- @@ -40,12 +52,27 @@ Free-threaded Python Starting with NumPy 2.1 and CPython 3.13, NumPy also has experimental support for python runtimes with the GIL disabled. See https://py-free-threading.github.io for more information about installing and -using free-threaded Python, as well as information about supporting it in -libraries that depend on NumPy. - -Because free-threaded Python does not have a global interpreter lock to -serialize access to Python objects, there are more opportunities for threads to -mutate shared state and create thread safety issues. In addition to the -limitations about locking of the ndarray object noted above, this also means -that arrays with ``dtype=object`` are not protected by the GIL, creating data -races for python objects that are not possible outside free-threaded python. +using :py:term:`free-threaded ` Python, as well as +information about supporting it in libraries that depend on NumPy. + +Because free-threaded Python does not have a +global interpreter lock to serialize access to Python objects, there are more +opportunities for threads to mutate shared state and create thread safety +issues. In addition to the limitations about locking of the +:class:`~numpy.ndarray` object noted above, this also means that arrays with +``dtype=np.object_`` are not protected by the GIL, creating data races for python +objects that are not possible outside free-threaded python. + +C-API Threading Support +----------------------- + +For developers writing C extensions that interact with NumPy, several parts of +the :doc:`C-API array documentation ` provide detailed +information about multithreading considerations. + +See Also +-------- + +* :doc:`/reference/random/multithreading` - Practical example of using NumPy's + random number generators in a multithreaded context with + :mod:`concurrent.futures`. diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index 0c675718818b..cac15b66cf14 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -19,6 +19,10 @@ that takes a fixed number of specific inputs and produces a fixed number of specific outputs. For detailed information on universal functions, see :ref:`ufuncs-basics`. + +There are also :ref:`generalized ufuncs ` which +are functions over vectors (or arrays) instead of only single-element scalars. + :class:`ufunc` ============== diff --git a/doc/source/release.rst b/doc/source/release.rst index 190d0512faeb..5736b4bd7ffc 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,11 @@ Release notes .. toctree:: :maxdepth: 2 + 2.5.0 + 2.4.4 + 2.4.3 + 2.4.2 + 2.4.1 2.4.0 2.3.5 2.3.4 diff --git a/doc/source/release/1.11.0-notes.rst b/doc/source/release/1.11.0-notes.rst index f6fe84a4b17f..4700e37203ce 100644 --- a/doc/source/release/1.11.0-notes.rst +++ b/doc/source/release/1.11.0-notes.rst @@ -27,11 +27,11 @@ Details of these improvements can be found below. Build System Changes ==================== -* Numpy now uses ``setuptools`` for its builds instead of plain distutils. +* NumPy now uses ``setuptools`` for its builds instead of plain distutils. This fixes usage of ``install_requires='numpy'`` in the ``setup.py`` files of - projects that depend on Numpy (see gh-6551). It potentially affects the way - that build/install methods for Numpy itself behave though. Please report any - unexpected behavior on the Numpy issue tracker. + projects that depend on NumPy (see gh-6551). It potentially affects the way + that build/install methods for NumPy itself behave though. Please report any + unexpected behavior on the NumPy issue tracker. * Bento build support and related files have been removed. * Single file build support and related files have been removed. @@ -39,7 +39,7 @@ Build System Changes Future Changes ============== -The following changes are scheduled for Numpy 1.12.0. +The following changes are scheduled for NumPy 1.12.0. * Support for Python 2.6, 3.2, and 3.3 will be dropped. * Relaxed stride checking will become the default. See the 1.8.0 release @@ -61,7 +61,7 @@ The following changes are scheduled for Numpy 1.12.0. In a future release the following changes will be made. * The ``rand`` function exposed in ``numpy.testing`` will be removed. That - function is left over from early Numpy and was implemented using the + function is left over from early NumPy and was implemented using the Python random module. The random number generators from ``numpy.random`` should be used instead. * The ``ndarray.view`` method will only allow c_contiguous arrays to be @@ -124,7 +124,7 @@ non-integers for degree specification. *np.dot* now raises ``TypeError`` instead of ``ValueError`` ----------------------------------------------------------- -This behaviour mimics that of other functions such as ``np.inner``. If the two +This behavior mimics that of other functions such as ``np.inner``. If the two arguments cannot be cast to a common type, it could have raised a ``TypeError`` or ``ValueError`` depending on their order. Now, ``np.dot`` will now always raise a ``TypeError``. @@ -194,7 +194,7 @@ New Features * ``f2py.compile`` has a new ``extension`` keyword parameter that allows the fortran extension to be specified for generated temp files. For instance, - the files can be specifies to be ``*.f90``. The ``verbose`` argument is + the files can be specified to be ``*.f90``. The ``verbose`` argument is also activated, it was previously ignored. * A ``dtype`` parameter has been added to ``np.random.randint`` @@ -254,7 +254,7 @@ Memory and speed improvements for masked arrays ----------------------------------------------- Creating a masked array with ``mask=True`` (resp. ``mask=False``) now uses ``np.ones`` (resp. ``np.zeros``) to create the mask, which is faster and -avoid a big memory peak. Another optimization was done to avoid a memory +avoids a big memory peak. Another optimization was done to avoid a memory peak and useless computations when printing a masked array. ``ndarray.tofile`` now uses fallocate on linux @@ -304,13 +304,13 @@ Instead, ``np.broadcast`` can be used in all cases. ``np.trace`` now respects array subclasses ------------------------------------------ -This behaviour mimics that of other functions such as ``np.diagonal`` and +This behavior mimics that of other functions such as ``np.diagonal`` and ensures, e.g., that for masked arrays ``np.trace(ma)`` and ``ma.trace()`` give the same result. ``np.dot`` now raises ``TypeError`` instead of ``ValueError`` ------------------------------------------------------------- -This behaviour mimics that of other functions such as ``np.inner``. If the two +This behavior is now consistent with other functions such as ``np.inner``. If the two arguments cannot be cast to a common type, it could have raised a ``TypeError`` or ``ValueError`` depending on their order. Now, ``np.dot`` will now always raise a ``TypeError``. diff --git a/doc/source/release/1.16.0-notes.rst b/doc/source/release/1.16.0-notes.rst index 07e06ca6e043..7a387629fe46 100644 --- a/doc/source/release/1.16.0-notes.rst +++ b/doc/source/release/1.16.0-notes.rst @@ -271,7 +271,7 @@ via the services of shippable.com. Appending to build flags ------------------------ -`numpy.distutils` has always overridden rather than appended to `LDFLAGS` and +``numpy.distutils`` has always overridden rather than appended to `LDFLAGS` and other similar such environment variables for compiling Fortran extensions. Now, if the `NPY_DISTUTILS_APPEND_FLAGS` environment variable is set to 1, the behavior will be appending. This applied to: `LDFLAGS`, `F77FLAGS`, diff --git a/doc/source/release/1.18.0-notes.rst b/doc/source/release/1.18.0-notes.rst index a90dbb7a67d9..43d2cdedf4b6 100644 --- a/doc/source/release/1.18.0-notes.rst +++ b/doc/source/release/1.18.0-notes.rst @@ -350,7 +350,7 @@ and load will be addressed in a future release. ``numpy.distutils`` append behavior changed for LDFLAGS and similar ------------------------------------------------------------------- -`numpy.distutils` has always overridden rather than appended to ``LDFLAGS`` and +``numpy.distutils`` has always overridden rather than appended to ``LDFLAGS`` and other similar such environment variables for compiling Fortran extensions. Now the default behavior has changed to appending - which is the expected behavior in most situations. To preserve the old (overwriting) behavior, set the diff --git a/doc/source/release/1.20.0-notes.rst b/doc/source/release/1.20.0-notes.rst index a2276ac5016d..298d417bb0c2 100644 --- a/doc/source/release/1.20.0-notes.rst +++ b/doc/source/release/1.20.0-notes.rst @@ -735,7 +735,7 @@ checking. Negation of user defined BLAS/LAPACK detection order ---------------------------------------------------- -`~numpy.distutils` allows negation of libraries when determining BLAS/LAPACK +``~numpy.distutils`` allows negation of libraries when determining BLAS/LAPACK libraries. This may be used to remove an item from the library resolution phase, i.e. to disallow NetLIB libraries one could do: diff --git a/doc/source/release/2.4.0-notes.rst b/doc/source/release/2.4.0-notes.rst index 29a7e5ce6073..b6afff63f5f1 100644 --- a/doc/source/release/2.4.0-notes.rst +++ b/doc/source/release/2.4.0-notes.rst @@ -4,16 +4,725 @@ NumPy 2.4.0 Release Notes ========================== +The NumPy 2.4.0 release continues the work to improve free threaded Python +support, user dtypes implementation, and annotations. There are many expired +deprecations and bug fixes as well. + +This release supports Python versions 3.11-3.14 + Highlights ========== -*We'll choose highlights for this release near the end of the release cycle.* +Apart from annotations and ``same_value`` kwarg, the 2.4 highlights are mostly +of interest to downstream developers. They should help in implementing new user +dtypes. + +* Many annotation improvements. In particular, runtime signature introspection. + +* New ``casting`` kwarg ``'same_value'`` for casting by value. + +* New ``PyUFunc_AddLoopsFromSpec`` function that can be used to add user sort + loops using the ``ArrayMethod`` API. + +* New ``__numpy_dtype__`` protocol. + +Deprecations +============ + +Setting the ``strides`` attribute is deprecated +----------------------------------------------- +Setting the strides attribute is now deprecated since mutating +an array is unsafe if an array is shared, especially by multiple +threads. As an alternative, you can create a new view (no copy) via: + +* ``np.lib.stride_tricks.strided_window_view`` if applicable, +* ``np.lib.stride_tricks.as_strided`` for the general case, +* or the ``np.ndarray`` constructor (``buffer`` is the original array) for a + light-weight version. + +(`gh-28925 `__) + +Positional ``out`` argument to ``np.maximum``, ``np.minimum`` is deprecated +--------------------------------------------------------------------------- +Passing the output array ``out`` positionally to ``numpy.maximum`` and +``numpy.minimum`` is deprecated. For example, ``np.maximum(a, b, c)`` will emit +a deprecation warning, since ``c`` is treated as the output buffer rather than +a third input. + +Always pass the output with the keyword form, e.g. ``np.maximum(a, b, +out=c)``. This makes intent clear and simplifies type annotations. + +(`gh-29052 `__) + +``align=`` must be passed as boolean to ``np.dtype()`` +------------------------------------------------------ +When creating a new ``dtype`` a ``VisibleDeprecationWarning`` will be given if +``align=`` is not a boolean. This is mainly to prevent accidentally passing a +subarray align flag where it has no effect, such as ``np.dtype("f8", 3)`` +instead of ``np.dtype(("f8", 3))``. We strongly suggest to always pass +``align=`` as a keyword argument. + +(`gh-29301 `__) + +Assertion and warning control utilities are deprecated +------------------------------------------------------ +``np.testing.assert_warns`` and ``np.testing.suppress_warnings`` are +deprecated. Use ``warnings.catch_warnings``, ``warnings.filterwarnings``, +``pytest.warns``, or ``pytest.filterwarnings`` instead. + +(`gh-29550 `__) + +``np.fix`` is pending deprecation +--------------------------------- +The ``numpy.fix`` function will be deprecated in a future release. It is +recommended to use ``numpy.trunc`` instead, as it provides the same +functionality of truncating decimal values to their integer parts. Static type +checkers might already report a warning for the use of ``numpy.fix``. + +(`gh-30168 `__) + +in-place modification of ``ndarray.shape`` is pending deprecation +----------------------------------------------------------------- +Setting the ``ndarray.shape`` attribute directly will be deprecated in a future +release. Instead of modifying the shape in place, it is recommended to use the +``numpy.reshape`` function. Static type checkers might already report a +warning for assignments to ``ndarray.shape``. + +(`gh-30282 `__) + +Deprecation of ``numpy.lib.user_array.container`` +------------------------------------------------- +The ``numpy.lib.user_array.container`` class is deprecated and will be removed +in a future version. + +(`gh-30284 `__) + + +Expired deprecations +==================== + +Removed deprecated ``MachAr`` runtime discovery mechanism. +---------------------------------------------------------- + +(`gh-29836 `__) + +Raise ``TypeError`` on attempt to convert array with ``ndim > 0`` to scalar +--------------------------------------------------------------------------- +Conversion of an array with ``ndim > 0`` to a scalar was deprecated in NumPy +1.25. Now, attempting to do so raises ``TypeError``. Ensure you extract a +single element from your array before performing this operation. + +(`gh-29841 `__) + +Removed numpy.linalg.linalg and numpy.fft.helper +------------------------------------------------ +The following were deprecated in NumPy 2.0 and have been moved to private +modules: + +* ``numpy.linalg.linalg`` + Use ``numpy.linalg`` instead. + +* ``numpy.fft.helper`` + Use ``numpy.fft`` instead. + +(`gh-29909 `__) + +Removed ``interpolation`` parameter from quantile and percentile functions +-------------------------------------------------------------------------- +The ``interpolation`` parameter was deprecated in NumPy 1.22.0 and has been +removed from the following functions: + +* ``numpy.percentile`` +* ``numpy.nanpercentile`` +* ``numpy.quantile`` +* ``numpy.nanquantile`` + +Use the ``method`` parameter instead. + +(`gh-29973 `__) + +Removed ``numpy.in1d`` +---------------------- +``numpy.in1d`` has been deprecated since NumPy 2.0 and is now removed in favor of ``numpy.isin``. + +(`gh-29978 `__) + +Removed ``numpy.ndindex.ndincr()`` +---------------------------------- +The ``ndindex.ndincr()`` method has been deprecated since NumPy 1.20 and is now +removed; use ``next(ndindex)`` instead. + +(`gh-29980 `__) + +Removed ``fix_imports`` parameter from ``numpy.save`` +----------------------------------------------------- +The ``fix_imports`` parameter was deprecated in NumPy 2.1.0 and is now removed. +This flag has been ignored since NumPy 1.17 and was only needed to support +loading files in Python 2 that were written in Python 3. + +(`gh-29984 `__) + +Removal of four undocumented ``ndarray.ctypes`` methods +------------------------------------------------------- +Four undocumented methods of the ``ndarray.ctypes`` object have been removed: + +* ``_ctypes.get_data()`` (use ``_ctypes.data`` instead) +* ``_ctypes.get_shape()`` (use ``_ctypes.shape`` instead) +* ``_ctypes.get_strides()`` (use ``_ctypes.strides`` instead) +* ``_ctypes.get_as_parameter()`` (use ``_ctypes._as_parameter_`` instead) + +These methods have been deprecated since NumPy 1.21. + +(`gh-29986 `__) + +Removed ``newshape`` parameter from ``numpy.reshape`` +----------------------------------------------------- +The ``newshape`` parameter was deprecated in NumPy 2.1.0 and has been +removed from ``numpy.reshape``. Pass it positionally or use ``shape=`` +on newer NumPy versions. + +(`gh-29994 `__) + +Removal of deprecated functions and arguments +--------------------------------------------- +The following long-deprecated APIs have been removed: + +* ``numpy.trapz`` — deprecated since NumPy 2.0 (2023-08-18). Use ``numpy.trapezoid`` or + ``scipy.integrate`` functions instead. + +* ``disp`` function — deprecated from 2.0 release and no longer functional. Use + your own printing function instead. + +* ``bias`` and ``ddof`` arguments in ``numpy.corrcoef`` — these had no effect + since NumPy 1.10. + +(`gh-29997 `__) + +Removed ``delimitor`` parameter from ``numpy.ma.mrecords.fromtextfile()`` +------------------------------------------------------------------------- +The ``delimitor`` parameter was deprecated in NumPy 1.22.0 and has been +removed from ``numpy.ma.mrecords.fromtextfile()``. Use ``delimiter`` instead. + +(`gh-30021 `__) + +``numpy.array2string`` and ``numpy.sum`` deprecations finalized +--------------------------------------------------------------- +The following long-deprecated APIs have been removed or converted to errors: + +* The ``style`` parameter has been removed from ``numpy.array2string``. + This argument had no effect since Numpy 1.14.0. Any arguments following + it, such as ``formatter`` have now been made keyword-only. + +* Calling ``np.sum(generator)`` directly on a generator object now raises a + ``TypeError``. This behavior was deprecated in NumPy 1.15.0. Use + ``np.sum(np.fromiter(generator))`` or the python ``sum`` builtin instead. + +(`gh-30068 `__) + + +Compatibility notes +=================== + +* NumPy's C extension modules have begun to use multi-phase initialisation, as + defined by PEP 489. As part of this, a new explicit check has been added that + each such module is only imported once per Python process. This comes with + the side-effect that deleting ``numpy`` from ``sys.modules`` and re-importing + it will now fail with an ``ImportError``. This has always been unsafe, with + unexpected side-effects, though did not previously raise an error. + + (`gh-29030 `__) + +* ``numpy.round`` now always returns a copy. Previously, it returned a view + for integer inputs for ``decimals >= 0`` and a copy in all other cases. + This change brings ``round`` in line with ``ceil``, ``floor`` and ``trunc``. + + (`gh-29137 `__) + +* Type-checkers will no longer accept calls to ``numpy.arange`` with + ``start`` as a keyword argument. This was done for compatibility with + the Array API standard. At runtime it is still possible to use + ``numpy.arange`` with ``start`` as a keyword argument. + + (`gh-30147 `__) + +* The Macro NPY_ALIGNMENT_REQUIRED has been removed The macro was defined in + the ``npy_cpu.h`` file, so might be regarded as semi public. As it turns out, + with modern compilers and hardware it is almost always the case that + alignment is required, so numpy no longer uses the macro. It is unlikely + anyone uses it, but you might want to compile with the ``-Wundef`` flag or + equivalent to be sure. + + (`gh-29094 `__) + + +C API changes +============= + +The NPY_SORTKIND enum has been enhanced with new variables +---------------------------------------------------------- +This is of interest if you are using ``PyArray_Sort`` or ``PyArray_ArgSort``. +We have changed the semantics of the old names in the ``NPY_SORTKIND`` enum and +added new ones. The changes are backward compatible, and no recompilation is +needed. The new names of interest are: + +* ``NPY_SORT_DEFAULT`` -- default sort (same value as ``NPY_QUICKSORT``) +* ``NPY_SORT_STABLE`` -- the sort must be stable (same value as ``NPY_MERGESORT``) +* ``NPY_SORT_DESCENDING`` -- the sort must be descending + +The semantic change is that ``NPY_HEAPSORT`` is mapped to ``NPY_QUICKSORT`` when used. +Note that ``NPY_SORT_DESCENDING`` is not yet implemented. + +(`gh-29642 `__) + +New ``NPY_DT_get_constant`` slot for DType constant retrieval +------------------------------------------------------------- +A new slot ``NPY_DT_get_constant`` has been added to the DType API, allowing +dtype implementations to provide constant values such as machine limits and +special values. The slot function has the signature:: + + int get_constant(PyArray_Descr *descr, int constant_id, void *ptr) + +It returns 1 on success, 0 if the constant is not available, or -1 on error. +The function is always called with the GIL held and may write to unaligned memory. + +Integer constants (marked with the ``1 << 16`` bit) return ``npy_intp`` values, +while floating-point constants return values of the dtype's native type. + +Implementing this can be used by user DTypes to provide ``numpy.finfo`` values. + +(`gh-29836 `__) + +A new ``PyUFunc_AddLoopsFromSpecs`` convenience function has been added to the C API. +------------------------------------------------------------------------------------- +This function allows adding multiple ufunc loops from their specs in one call +using a NULL-terminated array of ``PyUFunc_LoopSlot`` structs. It allows +registering sorting and argsorting loops using the new ArrayMethod API. + +(`gh-29900 `__) + + +New Features +============ + +* Let ``np.size`` accept multiple axes. + + (`gh-29240 `__) + +* Extend ``numpy.pad`` to accept a dictionary for the ``pad_width`` argument. + + (`gh-29273 `__) + +``'same_value'`` for casting by value +------------------------------------- +The ``casting`` kwarg now has a ``'same_value'`` option that checks the actual +values can be round-trip cast without changing value. Currently it is only +implemented in ``ndarray.astype``. This will raise a ``ValueError`` if any of the +values in the array would change as a result of the cast, including rounding of +floats or overflowing of ints. + +(`gh-29129 `__) + +``StringDType`` fill_value support in ``numpy.ma.MaskedArray`` +-------------------------------------------------------------- +Masked arrays now accept and preserve a Python ``str`` as their ``fill_value`` +when using the variable‑width ``StringDType`` (kind ``'T'``), including through +slicing and views. The default is ``'N/A'`` and may be overridden by any valid +string. This fixes issue `gh‑29421 `__ +and was implemented in pull request `gh‑29423 `__. + +(`gh-29423 `__) + +``ndmax`` option for ``numpy.array`` +------------------------------------ +The ``ndmax`` option is now available for ``numpy.array``. +It explicitly limits the maximum number of dimensions created from nested sequences. + +This is particularly useful when creating arrays of list-like objects with ``dtype=object``. +By default, NumPy recurses through all nesting levels to create the highest possible +dimensional array, but this behavior may not be desired when the intent is to preserve +nested structures as objects. The ``ndmax`` parameter provides explicit control over +this recursion depth. + +.. code-block:: python + + # Default behavior: Creates a 2D array + >>> a = np.array([[1, 2], [3, 4]], dtype=object) + >>> a + array([[1, 2], + [3, 4]], dtype=object) + >>> a.shape + (2, 2) + + # With ndmax=1: Creates a 1D array + >>> b = np.array([[1, 2], [3, 4]], dtype=object, ndmax=1) + >>> b + array([list([1, 2]), list([3, 4])], dtype=object) + >>> b.shape + (2,) + +(`gh-29569 `__) + +Warning emitted when using ``where`` without ``out`` +---------------------------------------------------- +Ufuncs called with a ``where`` mask and without an ``out`` positional or kwarg will +now emit a warning. This usage tends to trip up users who expect some value in +output locations where the mask is ``False`` (the ufunc will not touch those +locations). The warning can be suppressed by using ``out=None``. + +(`gh-29813 `__) + +DType sorting and argsorting supports the ArrayMethod API +--------------------------------------------------------- +User-defined dtypes can now implement custom sorting and argsorting using the +``ArrayMethod`` API. This mechanism can be used in place of the +``PyArray_ArrFuncs`` slots which may be deprecated in the future. + +The sorting and argsorting methods are registered by passing the arraymethod +specs that implement the operations to the new ``PyUFunc_AddLoopsFromSpecs`` +function. See the ``ArrayMethod`` API documentation for details. + +(`gh-29900 `__) + +New ``__numpy_dtype__`` protocol +-------------------------------- +NumPy now has a new ``__numpy_dtype__`` protocol. NumPy will check +for this attribute when converting to a NumPy dtype via ``np.dtype(obj)`` +or any ``dtype=`` argument. + +Downstream projects are encouraged to implement this for all dtype like +objects which may previously have used a ``.dtype`` attribute that returned +a NumPy dtype. +We expect to deprecate ``.dtype`` in the future to prevent interpreting +array-like objects with a ``.dtype`` attribute as a dtype. +If you wish you can implement ``__numpy_dtype__`` to ensure an earlier +warning or error (``.dtype`` is ignored if this is found). + +(`gh-30179 `__) + + +Improvements +============ + +Fix ``flatiter`` indexing edge cases +------------------------------------ + +The ``flatiter`` object now shares the same index preparation logic as +``ndarray``, ensuring consistent behavior and fixing several issues where +invalid indices were previously accepted or misinterpreted. + +Key fixes and improvements: + +* Stricter index validation + + - Boolean non-array indices like ``arr.flat[[True, True]]`` were + incorrectly treated as ``arr.flat[np.array([1, 1], dtype=int)]``. + They now raise an index error. Note that indices that match the + iterator's shape are expected to not raise in the future and be + handled as regular boolean indices. Use ``np.asarray()`` if + you want to match that behavior. + - Float non-array indices were also cast to integer and incorrectly + treated as ``arr.flat[np.array([1.0, 1.0], dtype=int)]``. This is now + deprecated and will be removed in a future version. + - 0-dimensional boolean indices like ``arr.flat[True]`` are also + deprecated and will be removed in a future version. + +* Consistent error types: + + Certain invalid ``flatiter`` indices that previously raised ``ValueError`` + now correctly raise ``IndexError``, aligning with ``ndarray`` behavior. + +* Improved error messages: + + The error message for unsupported index operations now provides more + specific details, including explicitly listing the valid index types, + instead of the generic ``IndexError: unsupported index operation``. + +(`gh-28590 `__) + +Improved error handling in ``np.quantile`` +------------------------------------------ +`np.quantile` now raises errors if: + +* All weights are zero +* At least one weight is ``np.nan`` +* At least one weight is ``np.inf`` + +(`gh-28595 `__) + +Improved error message for ``assert_array_compare`` +--------------------------------------------------- +The error message generated by ``assert_array_compare`` which is used by functions +like ``assert_allclose``, ``assert_array_less`` etc. now also includes information +about the indices at which the assertion fails. + +(`gh-29112 `__) + +Show unit information in ``__repr__`` for ``datetime64("NaT")`` +------------------------------------------------------------------ +When a ``datetime64`` object is "Not a Time" (NaT), its ``__repr__`` method now +includes the time unit of the datetime64 type. This makes it consistent with +the behavior of a ``timedelta64`` object. + +(`gh-29396 `__) + +Performance increase for scalar calculations +-------------------------------------------- +The speed of calculations on scalars has been improved by about a factor 6 for +ufuncs that take only one input (like ``np.sin(scalar)``), reducing the speed +difference from their ``math`` equivalents from a factor 19 to 3 (the speed +for arrays is left unchanged). + +(`gh-29819 `__) + +``numpy.finfo`` Refactor +------------------------ +The ``numpy.finfo`` class has been completely refactored to obtain floating-point +constants directly from C compiler macros rather than deriving them at runtime. +This provides better accuracy, platform compatibility and corrected +several attribute calculations: + +* Constants like ``eps``, ``min``, ``max``, ``smallest_normal``, and + ``smallest_subnormal`` now come directly from standard C macros (``FLT_EPSILON``, + ``DBL_MIN``, etc.), ensuring platform-correct values. + +* The deprecated ``MachAr`` runtime discovery mechanism has been removed. + +* Derived attributes have been corrected to match standard definitions: + ``machep`` and ``negep`` now use ``int(log2(eps))``; ``nexp`` accounts for + all exponent patterns; ``nmant`` excludes the implicit bit; and ``minexp`` + follows the C standard definition. + +* longdouble constants, Specifically ``smallest_normal`` now follows the + C standard definitions as per respecitive platform. + +* Special handling added for PowerPC's IBM double-double format. + +* New test suite added in ``test_finfo.py`` to validate all + ``finfo`` properties against expected machine arithmetic values for + float16, float32, and float64 types. + +(`gh-29836 `__) + +Multiple axes are now supported in ``numpy.trim_zeros`` +------------------------------------------------------- +The ``axis`` argument of ``numpy.trim_zeros`` now accepts a sequence; for example +``np.trim_zeros(x, axis=(0, 1))`` will trim the zeros from a multi-dimensional +array ``x`` along axes 0 and 1. This fixes issue +`gh‑29945 `__ and was implemented +in pull request `gh‑29947 `__. + +(`gh-29947 `__) + +Runtime signature introspection support has been significantly improved +----------------------------------------------------------------------- +Many NumPy functions, classes, and methods that previously raised +``ValueError`` when passed to ``inspect.signature()`` now return meaningful +signatures. This improves support for runtime type checking, IDE autocomplete, +documentation generation, and runtime introspection capabilities across the +NumPy API. + +Over three hundred classes and functions have been updated in total, including, +but not limited to, core classes such as ``ndarray``, ``generic``, ``dtype``, +``ufunc``, ``broadcast``, ``nditer``, etc., most methods of ``ndarray`` and +scalar types, array constructor functions (``array``, ``empty``, ``arange``, +``fromiter``, etc.), all ``ufuncs``, and many other commonly used functions, +including ``dot``, ``concat``, ``where``, ``bincount``, ``can_cast``, and +numerous others. + +(`gh-30208 `__) + + +Performance improvements and changes +==================================== + +Performance improvements to ``np.unique`` for string dtypes +----------------------------------------------------------- +The hash-based algorithm for unique extraction provides an order-of-magnitude +speedup on large string arrays. In an internal benchmark with about 1 billion +string elements, the hash-based np.unique completed in roughly 33.5 seconds, +compared to 498 seconds with the sort-based method – about 15× faster for +unsorted unique operations on strings. This improvement greatly reduces the +time to find unique values in very large string datasets. + +(`gh-28767 `__) + +Rewrite of ``np.ndindex`` using ``itertools.product`` +----------------------------------------------------- +The ``numpy.ndindex`` function now uses ``itertools.product`` internally, +providing significant improvements in performance for large iteration spaces, +while maintaining the original behavior and interface. For example, for an +array of shape (50, 60, 90) the NumPy ``ndindex`` benchmark improves +performance by a factor 5.2. + +(`gh-29165 `__) + +Performance improvements to ``np.unique`` for complex dtypes +------------------------------------------------------------ +The hash-based algorithm for unique extraction now also supports +complex dtypes, offering noticeable performance gains. + +In our benchmarks on complex128 arrays with 200,000 elements, +the hash-based approach was about 1.4–1.5× faster +than the sort-based baseline when there were 20% of unique values, +and about 5× faster when there were 0.2% of unique values. + +(`gh-29537 `__) + + +Changes +======= + +* Multiplication between a string and integer now raises OverflowError instead + of MemoryError if the result of the multiplication would create a string that + is too large to be represented. This follows Python's behavior. + + (`gh-29060 `__) + +* The accuracy of ``np.quantile`` and ``np.percentile`` for 16- and 32-bit + floating point input data has been improved. + + (`gh-29105 `__) + +``unique_values`` for string dtypes may return unsorted data +------------------------------------------------------------ +np.unique now supports hash‐based duplicate removal for string dtypes. +This enhancement extends the hash-table algorithm to byte strings ('S'), +Unicode strings ('U'), and the experimental string dtype ('T', StringDType). +As a result, calling np.unique() on an array of strings will use +the faster hash-based method to obtain unique values. +Note that this hash-based method does not guarantee that the returned unique values will be sorted. +This also works for StringDType arrays containing None (missing values) +when using equal_nan=True (treating missing values as equal). + +(`gh-28767 `__) + +Modulate dispatched x86 CPU features +------------------------------------ +**IMPORTANT**: The default setting for ``cpu-baseline`` on x86 has been raised +to ``x86-64-v2`` microarchitecture. This can be changed to none during build +time to support older CPUs, though SIMD optimizations for pre-2009 processors +are no longer maintained. + +NumPy has reorganized x86 CPU features into microarchitecture-based groups +instead of individual features, aligning with Linux distribution standards and +Google Highway requirements. + +Key changes: + +* Replaced individual x86 features with microarchitecture levels: ``X86_V2``, + ``X86_V3``, and ``X86_V4`` +* Raised the baseline to ``X86_V2`` +* Improved ``-`` operator behavior to properly exclude successor features that + imply the excluded feature +* Added meson redirections for removed feature names to maintain backward + compatibility +* Removed compiler compatibility workarounds for partial feature support (e.g., + AVX512 without mask operations) +* Removed legacy AMD features (XOP, FMA4) and discontinued Intel Xeon Phi + support + +New Feature Group Hierarchy: + +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + - Includes + * - ``X86_V2`` + - + - ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE4_1`` ``SSE4_2`` ``POPCNT`` ``CX16`` ``LAHF`` + * - ``X86_V3`` + - ``X86_V2`` + - ``AVX`` ``AVX2`` ``FMA3`` ``BMI`` ``BMI2`` ``LZCNT`` ``F16C`` ``MOVBE`` + * - ``X86_V4`` + - ``X86_V3`` + - ``AVX512F`` ``AVX512CD`` ``AVX512VL`` ``AVX512BW`` ``AVX512DQ`` + * - ``AVX512_ICL`` + - ``X86_V4`` + - ``AVX512VBMI`` ``AVX512VBMI2`` ``AVX512VNNI`` ``AVX512BITALG`` ``AVX512VPOPCNTDQ`` ``AVX512IFMA`` ``VAES`` ``GFNI`` ``VPCLMULQDQ`` + * - ``AVX512_SPR`` + - ``AVX512_ICL`` + - ``AVX512FP16`` + + +These groups correspond to CPU generations: + +- ``X86_V2``: x86-64-v2 microarchitectures (CPUs since 2009) +- ``X86_V3``: x86-64-v3 microarchitectures (CPUs since 2015) +- ``X86_V4``: x86-64-v4 microarchitectures (AVX-512 capable CPUs) +- ``AVX512_ICL``: Intel Ice Lake and similar CPUs +- ``AVX512_SPR``: Intel Sapphire Rapids and newer CPUs + +.. note:: + On 32-bit x86, ``cx16`` is excluded from ``X86_V2``. + +Documentation has been updated with details on using these new feature groups +with the current meson build system. + +(`gh-28896 `__) + +Fix bug in ``matmul`` for non-contiguous out kwarg parameter +------------------------------------------------------------ +In some cases, if ``out`` was non-contiguous, ``np.matmul`` would cause memory +corruption or a c-level assert. This was new to v2.3.0 and fixed in v2.3.1. + +(`gh-29179 `__) + +``__array_interface__`` with NULL pointer changed +------------------------------------------------- +The array interface now accepts NULL pointers (NumPy will do its own dummy +allocation, though). Previously, these incorrectly triggered an undocumented +scalar path. In the unlikely event that the scalar path was actually desired, +you can (for now) achieve the previous behavior via the correct scalar path by +not providing a ``data`` field at all. + +(`gh-29338 `__) + +``unique_values`` for complex dtypes may return unsorted data +------------------------------------------------------------- +np.unique now supports hash‐based duplicate removal for complex dtypes. This +enhancement extends the hash‐table algorithm to all complex types ('c'), and +their extended precision variants. The hash‐based method provides faster +extraction of unique values but does not guarantee that the result will be +sorted. + +(`gh-29537 `__) + +Sorting ``kind='heapsort'`` now maps to ``kind='quicksort'`` +------------------------------------------------------------ +It is unlikely that this change will be noticed, but if you do see a change in +execution time or unstable argsort order, that is likely the cause. Please let +us know if there is a performance regression. Congratulate us if it is improved +:) + +(`gh-29642 `__) + +``numpy.typing.DTypeLike`` no longer accepts ``None`` +----------------------------------------------------- +The type alias ``numpy.typing.DTypeLike`` no longer accepts ``None``. Instead of + +.. code-block:: python + + dtype: DTypeLike = None + +it should now be + +.. code-block:: python + + dtype: DTypeLike | None = None + +instead. +(`gh-29739 `__) -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) +The ``npymath`` and ``npyrandom`` libraries now have a ``.lib`` rather than a +``.a`` file extension on win-arm64, for compatibility for building with MSVC +and ``setuptools``. Please note that using these static libraries is +discouraged and for existing projects using it, it's best to use it with a +matching compiler toolchain, which is ``clang-cl`` on Windows on Arm. -.. **Content from release note snippets in doc/release/upcoming_changes:** +(`gh-29750 `__) -.. include:: notes-towncrier.rst diff --git a/doc/source/release/2.4.1-notes.rst b/doc/source/release/2.4.1-notes.rst new file mode 100644 index 000000000000..c033a070bd73 --- /dev/null +++ b/doc/source/release/2.4.1-notes.rst @@ -0,0 +1,52 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.4.1 Release Notes +========================= + +The NumPy 2.4.1 is a patch release that fixes bugs discovered after the +2.4.0 release. In particular, the typo `SeedlessSequence` is preserved to +enable wheels using the random Cython API and built against NumPy < 2.4.0 +to run without errors. + +This release supports Python versions 3.11-3.14 + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alexander Shadchin +* Bill Tompkins + +* Charles Harris +* Joren Hammudoglu +* Marten van Kerkwijk +* Nathan Goldbaum +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg + + +Pull requests merged +==================== + +A total of 15 pull requests were merged for this release. + +* `#30490 `__: MAINT: Prepare 2.4.x for further development +* `#30503 `__: DOC: ``numpy.select``\ : fix ``default`` parameter docstring... +* `#30504 `__: REV: Revert part of #30164 (#30500) +* `#30506 `__: TYP: ``numpy.select``\ : allow passing array-like ``default``... +* `#30507 `__: MNT: use if constexpr for compile-time branch selection +* `#30513 `__: BUG: Fix leak in flat assignment iterator +* `#30516 `__: BUG: fix heap overflow in fixed-width string multiply (#30511) +* `#30523 `__: BUG: Ensure summed weights returned by np.average always are... +* `#30527 `__: TYP: Fix return type of histogram2d +* `#30594 `__: MAINT: avoid passing ints to random functions that take double... +* `#30595 `__: BLD: Avoiding conflict with pygit2 for static build +* `#30596 `__: MAINT: Fix msvccompiler missing error on FreeBSD +* `#30608 `__: BLD: update vendored Meson to 1.9.2 +* `#30620 `__: ENH: use more fine-grained critical sections in array coercion... +* `#30623 `__: BUG: Undo result type change of quantile/percentile but keep... + diff --git a/doc/source/release/2.4.2-notes.rst b/doc/source/release/2.4.2-notes.rst new file mode 100644 index 000000000000..3cb4fc3b1955 --- /dev/null +++ b/doc/source/release/2.4.2-notes.rst @@ -0,0 +1,50 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.4.2 Release Notes +========================= + +The NumPy 2.4.2 is a patch release that fixes bugs discovered after the +2.4.1 release. Highlights are: + +- Fixes memory leaks +- Updates OpenBLAS to fix hangs + +This release supports Python versions 3.11-3.14 + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Daniel Tang + +* Joren Hammudoglu +* Kumar Aditya +* Matti Picus +* Nathan Goldbaum +* Ralf Gommers +* Sebastian Berg +* Vikram Kumar + + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#30629 `__: MAINT: Prepare 2.4.x for further development +* `#30636 `__: TYP: ``arange``\ : accept datetime strings +* `#30657 `__: MAINT: avoid possible race condition by not touching ``os.environ``... +* `#30700 `__: BUG: validate contraction axes in tensordot (#30521) +* `#30701 `__: DOC: __array_namespace__info__: set_module not __module__ (#30679) +* `#30702 `__: BUG: fix free-threaded PyObject layout in replace_scalar_type_names... +* `#30703 `__: TST: fix limited API example in tests for latest Cython +* `#30709 `__: BUG: Fix some bugs found via valgrind (#30680) +* `#30712 `__: MAINT: replace ob_type access with Py_TYPE in PyArray_CheckExact +* `#30713 `__: BUG: Fixup the quantile promotion fixup +* `#30736 `__: BUG: fix thread safety of ``array_getbuffer`` (#30667) +* `#30737 `__: backport scipy-openblas version change + diff --git a/doc/source/release/2.4.3-notes.rst b/doc/source/release/2.4.3-notes.rst new file mode 100644 index 000000000000..09986140c2a9 --- /dev/null +++ b/doc/source/release/2.4.3-notes.rst @@ -0,0 +1,52 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.4.3 Release Notes +========================= + +The NumPy 2.4.3 is a patch release that fixes bugs discovered after the +2.4.2 release. The most user visible fix may be a threading fix for +OpenBLAS on ARM, closing issue #30816. + +This release supports Python versions 3.11-3.14 + + +Contributors +============ + +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Antareep Sarkar + +* Charles Harris +* Joren Hammudoglu +* Matthieu Darbois +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Pieter Eendebak +* Sebastian Berg +* Warren Weckesser +* stratakis + + + +Pull requests merged +==================== + +A total of 14 pull requests were merged for this release. + +* `#30759 `__: MAINT: Prepare 2.4.x for further development +* `#30827 `__: BUG: Fix some leaks found via LeakSanitizer (#30756) +* `#30841 `__: MAINT: Synchronize 2.4.x submodules with main +* `#30849 `__: TYP: ``matlib``\ : missing extended precision imports +* `#30850 `__: BUG: Fix weak hash function in np.isin(). (#30840) +* `#30921 `__: BUG: fix infinite recursion in np.ma.flatten_structured_array... +* `#30922 `__: BUG: Fix buffer overrun in CPU baseline validation (#30877) +* `#30923 `__: BUG: Fix busdaycalendar's handling of a bool array weekmask.... +* `#30924 `__: BUG: Fix reference leaks and NULL pointer dereferences (#30908) +* `#30925 `__: MAINT: fix two minor issues noticed when touching the C API setup +* `#30955 `__: ENH: Test .kind not .char in np.testing.assert_equal (#30879) +* `#30957 `__: BUG: fix type issues in uses if PyDataType macros +* `#30958 `__: MAINT: Don't use vulture 2.15, it has false positives +* `#30973 `__: MAINT: update openblas (#30961) + diff --git a/doc/source/release/2.4.4-notes.rst b/doc/source/release/2.4.4-notes.rst new file mode 100644 index 000000000000..568d2a526127 --- /dev/null +++ b/doc/source/release/2.4.4-notes.rst @@ -0,0 +1,41 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.4.4 Release Notes +========================= + +The NumPy 2.4.4 is a patch release that fixes bugs discovered after the 2.4.3 +release. It should finally close issue #30816, the OpenBLAS threading problem +on ARM. + +This release supports Python versions 3.11-3.14 + + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Daniel Haag + +* Denis Prokopenko + +* Harshith J + +* Koki Watanabe +* Marten van Kerkwijk +* Matti Picus +* Nathan Goldbaum + + +Pull requests merged +==================== + +A total of 7 pull requests were merged for this release. + +* `#30978 `__: MAINT: Prepare 2.4.x for further development +* `#31049 `__: BUG: Add test to reproduce problem described in #30816 (#30818) +* `#31052 `__: BUG: fix FNV-1a 64-bit selection by using NPY_SIZEOF_UINTP (#31035) +* `#31053 `__: BUG: avoid warning on ufunc with where=True and no output +* `#31058 `__: DOC: document caveats of ndarray.resize on 3.14 and newer +* `#31079 `__: TST: fix POWER VSX feature mapping (#30801) +* `#31084 `__: MAINT: numpy.i: Replace deprecated ``sprintf`` with ``snprintf``... diff --git a/doc/source/release/2.5.0-notes.rst b/doc/source/release/2.5.0-notes.rst new file mode 100644 index 000000000000..1c07e859a7b9 --- /dev/null +++ b/doc/source/release/2.5.0-notes.rst @@ -0,0 +1,19 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.5.0 Release Notes +========================== + + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/doc/source/try_examples.json b/doc/source/try_examples.json index 510efcdd2694..823d4a5d1e82 100644 --- a/doc/source/try_examples.json +++ b/doc/source/try_examples.json @@ -1,8 +1,8 @@ { "global_min_height": "400px", "ignore_patterns": [ - "distutils.html*", "reference\/typing.html*", "numpy.__array_namespace_info__.html*" ] } + diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index 5f620fa36cef..f1007db45acc 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -779,7 +779,7 @@ You can add the arrays together with the plus sign. :: >>> data = np.array([1, 2]) - >>> ones = np.ones(2, dtype=int) + >>> ones = np.ones(2, dtype=np.int_) >>> data + ones array([2, 3]) @@ -1348,7 +1348,7 @@ For example:: With a single iterable argument, return its biggest item. The default keyword-only argument specifies an object to return if the provided iterable is empty. - With two or more arguments, return the largest argument. + With two or more ...arguments, return the largest argument. diff --git a/doc/source/user/basics.copies.rst b/doc/source/user/basics.copies.rst index 6d8e78488e7e..c0dbc8e8fb51 100644 --- a/doc/source/user/basics.copies.rst +++ b/doc/source/user/basics.copies.rst @@ -106,22 +106,6 @@ otherwise. In most cases, the strides can be modified to reshape the array with a view. However, in some cases where the array becomes non-contiguous (perhaps after a :meth:`.ndarray.transpose` operation), the reshaping cannot be done by modifying strides and requires a copy. -In these cases, we can raise an error by assigning the new shape to the -shape attribute of the array. For example:: - - >>> import numpy as np - >>> x = np.ones((2, 3)) - >>> y = x.T # makes the array non-contiguous - >>> y - array([[1., 1.], - [1., 1.], - [1., 1.]]) - >>> z = y.view() - >>> z.shape = 6 - Traceback (most recent call last): - ... - AttributeError: Incompatible shape for in-place modification. Use - `.reshape()` to make a copy with the desired shape. Taking the example of another operation, :func:`numpy.ravel` returns a contiguous flattened view of the array wherever possible. On the other hand, diff --git a/doc/source/user/basics.creation.rst b/doc/source/user/basics.creation.rst index 1a7707ee69c9..19fa737d5f8d 100644 --- a/doc/source/user/basics.creation.rst +++ b/doc/source/user/basics.creation.rst @@ -20,7 +20,7 @@ There are 6 general mechanisms for creating arrays: 6) Use of special library functions (e.g., random) You can use these methods to create ndarrays or :ref:`structured_arrays`. -This document will cover general methods for ndarray creation. +This document will cover general methods for ndarray creation. 1) Converting Python sequences to NumPy arrays ============================================== @@ -29,8 +29,8 @@ NumPy arrays can be defined using Python sequences such as lists and tuples. Lists and tuples are defined using ``[...]`` and ``(...)``, respectively. Lists and tuples can define ndarray creation: -* a list of numbers will create a 1D array, -* a list of lists will create a 2D array, +* a list of numbers will create a 1D array, +* a list of lists will create a 2D array, * further nested lists will create higher-dimensional arrays. In general, any array object is called an **ndarray** in NumPy. :: @@ -72,7 +72,7 @@ results, for example:: Notice when you perform operations with two arrays of the same ``dtype``: ``uint32``, the resulting array is the same type. When you -perform operations with different ``dtype``, NumPy will +perform operations with different ``dtype``, NumPy will assign a new type that satisfies all of the array elements involved in the computation, here ``uint32`` and ``int32`` can both be represented in as ``int64``. @@ -88,7 +88,7 @@ you create the array. .. 40 functions seems like a small number, but the routines.array-creation - has ~47. I'm sure there are more. + has ~47. I'm sure there are more. NumPy has over 40 built-in functions for creating arrays as laid out in the :ref:`Array creation routines `. @@ -104,7 +104,7 @@ dimension of the array they create: The 1D array creation functions e.g. :func:`numpy.linspace` and :func:`numpy.arange` generally need at least two inputs, ``start`` and -``stop``. +``stop``. :func:`numpy.arange` creates arrays with regularly incrementing values. Check the documentation for complete information and examples. A few @@ -113,7 +113,7 @@ examples are shown:: >>> import numpy as np >>> np.arange(10) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.arange(2, 10, dtype=float) + >>> np.arange(2, 10, dtype=np.float64) array([2., 3., 4., 5., 6., 7., 8., 9.]) >>> np.arange(2, 3, 0.1) array([2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9]) @@ -121,8 +121,8 @@ examples are shown:: Note: best practice for :func:`numpy.arange` is to use integer start, end, and step values. There are some subtleties regarding ``dtype``. In the second example, the ``dtype`` is defined. In the third example, the array is -``dtype=float`` to accommodate the step size of ``0.1``. Due to roundoff error, -the ``stop`` value is sometimes included. +``dtype=np.float64`` to accommodate the step size of ``0.1``. Due to roundoff error, +the ``stop`` value is sometimes included. :func:`numpy.linspace` will create arrays with a specified number of elements, and spaced equally between the specified beginning and end values. For @@ -140,7 +140,7 @@ number of elements and the starting and end point. The previous ------------------------------- The 2D array creation functions e.g. :func:`numpy.eye`, :func:`numpy.diag`, and :func:`numpy.vander` -define properties of special matrices represented as 2D arrays. +define properties of special matrices represented as 2D arrays. ``np.eye(n, m)`` defines a 2D identity matrix. The elements where i=j (row index and column index are equal) are 1 and the rest are 0, as such:: @@ -159,7 +159,7 @@ and the rest are 0, as such:: the diagonal *or* if given a 2D array returns a 1D array that is only the diagonal elements. The two array creation functions can be helpful while doing linear algebra, as such:: - + >>> import numpy as np >>> np.diag([1, 2, 3]) array([[1, 0, 0], @@ -197,7 +197,7 @@ routine is helpful in generating linear least squares models, as such:: [ 8, 4, 2, 1], [27, 9, 3, 1], [64, 16, 4, 1]]) - + 3 - general ndarray creation functions -------------------------------------- @@ -205,20 +205,20 @@ The ndarray creation functions e.g. :func:`numpy.ones`, :func:`numpy.zeros`, and :meth:`~numpy.random.Generator.random` define arrays based upon the desired shape. The ndarray creation functions can create arrays with any dimension by specifying how many dimensions -and length along that dimension in a tuple or list. +and length along that dimension in a tuple or list. :func:`numpy.zeros` will create an array filled with 0 values with the specified shape. The default dtype is ``float64``:: >>> import numpy as np >>> np.zeros((2, 3)) - array([[0., 0., 0.], + array([[0., 0., 0.], [0., 0., 0.]]) >>> np.zeros((2, 3, 2)) array([[[0., 0.], [0., 0.], [0., 0.]], - + [[0., 0.], [0., 0.], [0., 0.]]]) @@ -228,7 +228,7 @@ specified shape. The default dtype is ``float64``:: >>> import numpy as np >>> np.ones((2, 3)) - array([[1., 1., 1.], + array([[1., 1., 1.], [1., 1., 1.]]) >>> np.ones((2, 3, 2)) array([[[1., 1.], @@ -265,11 +265,11 @@ dimension:: >>> import numpy as np >>> np.indices((3,3)) - array([[[0, 0, 0], - [1, 1, 1], - [2, 2, 2]], - [[0, 1, 2], - [0, 1, 2], + array([[[0, 0, 0], + [1, 1, 1], + [2, 2, 2]], + [[0, 1, 2], + [0, 1, 2], [0, 1, 2]]]) This is particularly useful for evaluating functions of multiple dimensions on @@ -322,7 +322,7 @@ arrays into a 4-by-4 array using ``block``:: [ 0., 0., 0., -4.]]) Other routines use similar syntax to join ndarrays. Check the -routine's documentation for further examples and syntax. +routine's documentation for further examples and syntax. 4) Reading arrays from disk, either from standard or custom formats =================================================================== @@ -330,7 +330,7 @@ routine's documentation for further examples and syntax. This is the most common case of large array creation. The details depend greatly on the format of data on disk. This section gives general pointers on how to handle various formats. For more detailed examples of IO look at -:ref:`How to Read and Write files `. +:ref:`How to Read and Write files `. Standard binary formats ----------------------- @@ -397,4 +397,4 @@ knowledge to interface with C or C++. NumPy is the fundamental library for array containers in the Python Scientific Computing stack. Many Python libraries, including SciPy, Pandas, and OpenCV, use NumPy ndarrays as the common format for data exchange, These libraries can create, -operate on, and work with NumPy arrays. +operate on, and work with NumPy arrays. diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst index 117d60f85467..8140517903c3 100644 --- a/doc/source/user/basics.dispatch.rst +++ b/doc/source/user/basics.dispatch.rst @@ -12,301 +12,13 @@ arrays, an N-dimensional array distributed across multiple nodes, and `cupy `_ arrays, an N-dimensional array on a GPU. -To get a feel for writing custom array containers, we'll begin with a simple -example that has rather narrow utility but illustrates the concepts involved. +For comprehensive documentation on writing custom array containers, please see: ->>> import numpy as np ->>> class DiagonalArray: -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self, dtype=None, copy=None): -... if copy is False: -... raise ValueError( -... "`copy=False` isn't supported. A copy is always created." -... ) -... return self._i * np.eye(self._N, dtype=dtype) - -Our custom array can be instantiated like: - ->>> arr = DiagonalArray(5, 1) ->>> arr -DiagonalArray(N=5, value=1) - -We can convert to a numpy array using :func:`numpy.array` or -:func:`numpy.asarray`, which will call its ``__array__`` method to obtain a -standard ``numpy.ndarray``. - ->>> np.asarray(arr) -array([[1., 0., 0., 0., 0.], - [0., 1., 0., 0., 0.], - [0., 0., 1., 0., 0.], - [0., 0., 0., 1., 0.], - [0., 0., 0., 0., 1.]]) - -The ``__array__`` method can optionally accept a `dtype` argument. If provided, -this argument specifies the desired data type for the resulting NumPy array. -Your implementation should attempt to convert the data to this `dtype` -if possible. If the conversion is not supported, it's generally best -to fall back to a default type or raise a `TypeError` or `ValueError`. - -Here's an example demonstrating its use with `dtype` specification: - ->>> np.asarray(arr, dtype=np.float32) -array([[1., 0., 0., 0., 0.], - [0., 1., 0., 0., 0.], - [0., 0., 1., 0., 0.], - [0., 0., 0., 1., 0.], - [0., 0., 0., 0., 1.]], dtype=float32) - -If we operate on ``arr`` with a numpy function, numpy will again use the -``__array__`` interface to convert it to an array and then apply the function -in the usual way. - ->>> np.multiply(arr, 2) -array([[2., 0., 0., 0., 0.], - [0., 2., 0., 0., 0.], - [0., 0., 2., 0., 0.], - [0., 0., 0., 2., 0.], - [0., 0., 0., 0., 2.]]) - - -Notice that the return type is a standard ``numpy.ndarray``. - ->>> type(np.multiply(arr, 2)) - - -How can we pass our custom array type through this function? Numpy allows a -class to indicate that it would like to handle computations in a custom-defined -way through the interfaces ``__array_ufunc__`` and ``__array_function__``. Let's -take one at a time, starting with ``__array_ufunc__``. This method covers -:ref:`ufuncs`, a class of functions that includes, for example, -:func:`numpy.multiply` and :func:`numpy.sin`. - -The ``__array_ufunc__`` receives: - -- ``ufunc``, a function like ``numpy.multiply`` -- ``method``, a string, differentiating between ``numpy.multiply(...)`` and - variants like ``numpy.multiply.outer``, ``numpy.multiply.accumulate``, and so - on. For the common case, ``numpy.multiply(...)``, ``method == '__call__'``. -- ``inputs``, which could be a mixture of different types -- ``kwargs``, keyword arguments passed to the function - -For this example we will only handle the method ``__call__`` - ->>> from numbers import Number ->>> class DiagonalArray: -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self, dtype=None, copy=None): -... if copy is False: -... raise ValueError( -... "`copy=False` isn't supported. A copy is always created." -... ) -... return self._i * np.eye(self._N, dtype=dtype) -... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): -... if method == '__call__': -... N = None -... scalars = [] -... for input in inputs: -... if isinstance(input, Number): -... scalars.append(input) -... elif isinstance(input, self.__class__): -... scalars.append(input._i) -... if N is not None: -... if N != input._N: -... raise TypeError("inconsistent sizes") -... else: -... N = input._N -... else: -... return NotImplemented -... return self.__class__(N, ufunc(*scalars, **kwargs)) -... else: -... return NotImplemented - -Now our custom array type passes through numpy functions. - ->>> arr = DiagonalArray(5, 1) ->>> np.multiply(arr, 3) -DiagonalArray(N=5, value=3) ->>> np.add(arr, 3) -DiagonalArray(N=5, value=4) ->>> np.sin(arr) -DiagonalArray(N=5, value=0.8414709848078965) - -At this point ``arr + 3`` does not work. - ->>> arr + 3 -Traceback (most recent call last): -... -TypeError: unsupported operand type(s) for +: 'DiagonalArray' and 'int' - -To support it, we need to define the Python interfaces ``__add__``, ``__lt__``, -and so on to dispatch to the corresponding ufunc. We can achieve this -conveniently by inheriting from the mixin -:class:`~numpy.lib.mixins.NDArrayOperatorsMixin`. - ->>> import numpy.lib.mixins ->>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin): -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self, dtype=None, copy=None): -... if copy is False: -... raise ValueError( -... "`copy=False` isn't supported. A copy is always created." -... ) -... return self._i * np.eye(self._N, dtype=dtype) -... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): -... if method == '__call__': -... N = None -... scalars = [] -... for input in inputs: -... if isinstance(input, Number): -... scalars.append(input) -... elif isinstance(input, self.__class__): -... scalars.append(input._i) -... if N is not None: -... if N != input._N: -... raise TypeError("inconsistent sizes") -... else: -... N = input._N -... else: -... return NotImplemented -... return self.__class__(N, ufunc(*scalars, **kwargs)) -... else: -... return NotImplemented - ->>> arr = DiagonalArray(5, 1) ->>> arr + 3 -DiagonalArray(N=5, value=4) ->>> arr > 0 -DiagonalArray(N=5, value=True) - -Now let's tackle ``__array_function__``. We'll create dict that maps numpy -functions to our custom variants. - ->>> HANDLED_FUNCTIONS = {} ->>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin): -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self, dtype=None, copy=None): -... if copy is False: -... raise ValueError( -... "`copy=False` isn't supported. A copy is always created." -... ) -... return self._i * np.eye(self._N, dtype=dtype) -... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): -... if method == '__call__': -... N = None -... scalars = [] -... for input in inputs: -... # In this case we accept only scalar numbers or DiagonalArrays. -... if isinstance(input, Number): -... scalars.append(input) -... elif isinstance(input, self.__class__): -... scalars.append(input._i) -... if N is not None: -... if N != input._N: -... raise TypeError("inconsistent sizes") -... else: -... N = input._N -... else: -... return NotImplemented -... return self.__class__(N, ufunc(*scalars, **kwargs)) -... else: -... return NotImplemented -... def __array_function__(self, func, types, args, kwargs): -... if func not in HANDLED_FUNCTIONS: -... return NotImplemented -... # Note: this allows subclasses that don't override -... # __array_function__ to handle DiagonalArray objects. -... if not all(issubclass(t, self.__class__) for t in types): -... return NotImplemented -... return HANDLED_FUNCTIONS[func](*args, **kwargs) -... - -A convenient pattern is to define a decorator ``implements`` that can be used -to add functions to ``HANDLED_FUNCTIONS``. - ->>> def implements(np_function): -... "Register an __array_function__ implementation for DiagonalArray objects." -... def decorator(func): -... HANDLED_FUNCTIONS[np_function] = func -... return func -... return decorator -... - -Now we write implementations of numpy functions for ``DiagonalArray``. -For completeness, to support the usage ``arr.sum()`` add a method ``sum`` that -calls ``numpy.sum(self)``, and the same for ``mean``. - ->>> @implements(np.sum) -... def sum(arr): -... "Implementation of np.sum for DiagonalArray objects" -... return arr._i * arr._N -... ->>> @implements(np.mean) -... def mean(arr): -... "Implementation of np.mean for DiagonalArray objects" -... return arr._i / arr._N -... ->>> arr = DiagonalArray(5, 1) ->>> np.sum(arr) -5 ->>> np.mean(arr) -0.2 - -If the user tries to use any numpy functions not included in -``HANDLED_FUNCTIONS``, a ``TypeError`` will be raised by numpy, indicating that -this operation is not supported. For example, concatenating two -``DiagonalArrays`` does not produce another diagonal array, so it is not -supported. - ->>> np.concatenate([arr, arr]) -Traceback (most recent call last): -... -TypeError: no implementation found for 'numpy.concatenate' on types that implement __array_function__: [] - -Additionally, our implementations of ``sum`` and ``mean`` do not accept the -optional arguments that numpy's implementation does. - ->>> np.sum(arr, axis=0) -Traceback (most recent call last): -... -TypeError: sum() got an unexpected keyword argument 'axis' - - -The user always has the option of converting to a normal ``numpy.ndarray`` with -:func:`numpy.asarray` and using standard numpy from there. - ->>> np.concatenate([np.asarray(arr), np.asarray(arr)]) -array([[1., 0., 0., 0., 0.], - [0., 1., 0., 0., 0.], - [0., 0., 1., 0., 0.], - [0., 0., 0., 1., 0.], - [0., 0., 0., 0., 1.], - [1., 0., 0., 0., 0.], - [0., 1., 0., 0., 0.], - [0., 0., 1., 0., 0.], - [0., 0., 0., 1., 0.], - [0., 0., 0., 0., 1.]]) - - -The implementation of ``DiagonalArray`` in this example only handles the -``np.sum`` and ``np.mean`` functions for brevity. Many other functions in the -Numpy API are also available to wrap and a full-fledged custom array container -can explicitly support all functions that Numpy makes available to wrap. +- :ref:`Interoperability with NumPy ` - the main guide + covering ``__array_ufunc__`` and ``__array_function__`` protocols +- :ref:`Special attributes and methods ` - see + ``class.__array__()`` for documentation and example implementing the + ``__array__()`` method Numpy provides some utilities to aid testing of custom array containers that implement the ``__array_ufunc__`` and ``__array_function__`` protocols in the diff --git a/doc/source/user/basics.indexing.rst b/doc/source/user/basics.indexing.rst index 7481468fe6db..51d126f8183b 100644 --- a/doc/source/user/basics.indexing.rst +++ b/doc/source/user/basics.indexing.rst @@ -54,7 +54,7 @@ and accepts negative indices for indexing from the end of the array. :: It is not necessary to separate each dimension's index into its own set of square brackets. :: - >>> x.shape = (2, 5) # now x is 2-dimensional + >>> x = x.reshape((2, 5)) # now x is 2-dimensional >>> x[1, 3] 8 >>> x[1, -1] diff --git a/doc/source/user/basics.interoperability.rst b/doc/source/user/basics.interoperability.rst index b1c115ff1de0..f9f52bfeab8e 100644 --- a/doc/source/user/basics.interoperability.rst +++ b/doc/source/user/basics.interoperability.rst @@ -130,7 +130,7 @@ We can check that ``arr`` and ``new_arr`` share the same data buffer: The ``__array__()`` method ~~~~~~~~~~~~~~~~~~~~~~~~~~ -The ``__array__()`` method ensures that any NumPy-like object (an array, any +The `__array__() <../reference/arrays.classes.html#numpy.class.\_\_array\_\_>`__ method ensures that any NumPy-like object (an array, any object exposing the array interface, an object whose ``__array__()`` method returns an array or any nested sequence) that implements it can be used as a NumPy array. If possible, this will mean using ``__array__()`` to create a NumPy @@ -149,9 +149,6 @@ is needed. If a class implements the old signature ``__array__(self)``, for ``np.array(a)`` a warning will be raised saying that ``dtype`` and ``copy`` arguments are missing. -To see an example of a custom array implementation including the use of -``__array__()``, see :ref:`basics.dispatch`. - The DLPack Protocol ~~~~~~~~~~~~~~~~~~~ @@ -216,7 +213,7 @@ The ``__array_ufunc__`` protocol A :ref:`universal function (or ufunc for short) ` is a “vectorized” wrapper for a function that takes a fixed number of specific inputs and produces a fixed number of specific outputs. The output of the ufunc (and -its methods) is not necessarily a ndarray, if not all input arguments are +its methods) is not necessarily an ndarray, if not all input arguments are ndarrays. Indeed, if any input defines an ``__array_ufunc__`` method, control will be passed completely to that function, i.e., the ufunc is overridden. The ``__array_ufunc__`` method defined on that (non-ndarray) object has access to @@ -286,10 +283,10 @@ Consider the following: >>> type(ser) pandas.core.series.Series -Now, ``ser`` is **not** a ndarray, but because it +Now, ``ser`` is **not** an ndarray, but because it `implements the __array_ufunc__ protocol `__, -we can apply ufuncs to it as if it were a ndarray: +we can apply ufuncs to it as if it were an ndarray: >>> np.exp(ser) 0 2.718282 @@ -475,7 +472,7 @@ Convert a PyTorch CPU tensor to NumPy array: The imported arrays are read-only so writing or operating in-place will fail: - >>> x.flags.writeable + >>> x_np.flags.writeable False >>> x_np[1] = 1 Traceback (most recent call last): @@ -490,16 +487,19 @@ will mean duplicating the memory. Do not do this for very large arrays: .. note:: - Note that GPU tensors can't be converted to NumPy arrays since NumPy doesn't - support GPU devices: + GPU tensors cannot be directly zero-copy converted to NumPy arrays since + NumPy does not support GPU devices. However, since DLPack v1, cross-device + copy is supported via the ``device`` parameter: >>> x_torch = torch.arange(5, device='cuda') - >>> np.from_dlpack(x_torch) + >>> np.from_dlpack(x_torch) # fails: implicit device=None means same device Traceback (most recent call last): File "", line 1, in RuntimeError: Unsupported device in DLTensor. + >>> np.from_dlpack(x_torch, device='cpu') # works: explicit copy to CPU + array([0, 1, 2, 3, 4]) - But, if both libraries support the device the data buffer is on, it is + If both libraries support the device the data buffer is on, it is possible to use the ``__dlpack__`` protocol (e.g. PyTorch_ and CuPy_): >>> x_torch = torch.arange(5, device='cuda') diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst index d5b6bba8f28d..2a1523ba209b 100644 --- a/doc/source/user/basics.io.genfromtxt.rst +++ b/doc/source/user/basics.io.genfromtxt.rst @@ -201,16 +201,16 @@ The main way to control how the sequences of strings we have read from the file are converted to other types is to set the ``dtype`` argument. Acceptable values for this argument are: -* a single type, such as ``dtype=float``. +* a single type, such as ``dtype=np.float64``. The output will be 2D with the given dtype, unless a name has been associated with each column with the use of the ``names`` argument - (see below). Note that ``dtype=float`` is the default for + (see below). Note that ``dtype=np.float64`` is the default for :func:`~numpy.genfromtxt`. -* a sequence of types, such as ``dtype=(int, float, float)``. +* a sequence of types, such as ``dtype=(np.int_, np.float64, np.float64)``. * a comma-separated string, such as ``dtype="i4,f8,|U3"``. * a dictionary with two keys ``'names'`` and ``'formats'``. * a sequence of tuples ``(name, type)``, such as - ``dtype=[('A', int), ('B', float)]``. + ``dtype=[('A', np.int_), ('B', np.float64)]``. * an existing :class:`numpy.dtype` object. * the special value ``None``. In that case, the type of the columns will be determined from the data @@ -243,7 +243,7 @@ each column. A first possibility is to use an explicit structured dtype, as mentioned previously:: >>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=[(_, int) for _ in "abc"]) + >>> np.genfromtxt(data, dtype=[(_, np.int_) for _ in "abc"]) array([(1, 2, 3), (4, 5, 6)], dtype=[('a', '>> data = StringIO("1 2 3\n 4 5 6") - >>> ndtype=[('a',int), ('b', float), ('c', int)] + >>> ndtype=[('a', np.int_), ('b', np.float64), ('c', np.int_)] >>> names = ["A", "B", "C"] >>> np.genfromtxt(data, names=names, dtype=ndtype) array([(1, 2., 3), (4, 5., 6)], @@ -289,7 +289,7 @@ with the standard NumPy default of ``"f%i"``, yielding names like ``f0``, ``f1`` and so forth:: >>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int)) + >>> np.genfromtxt(data, dtype=(np.int_, np.float64, np.int_)) array([(1, 2., 3), (4, 5., 6)], dtype=[('f0', '>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int), names="a") + >>> np.genfromtxt(data, dtype=(np.int_, np.float64, np.int_), names="a") array([(1, 2., 3), (4, 5., 6)], dtype=[('a', '>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int), defaultfmt="var_%02i") + >>> np.genfromtxt(data, dtype=(np.int_, np.float64, np.int_), defaultfmt="var_%02i") array([(1, 2., 3), (4, 5., 6)], dtype=[('var_00', '>> data = "N/A, 2, 3\n4, ,???" >>> kwargs = dict(delimiter=",", - ... dtype=int, + ... dtype=np.int_, ... names="a,b,c", ... missing_values={0:"N/A", 'b':" ", 2:"???"}, ... filling_values={0:0, 'b':0, 2:-999}) diff --git a/doc/source/user/basics.subclassing.rst b/doc/source/user/basics.subclassing.rst index 2a369aaae17c..202561a958a8 100644 --- a/doc/source/user/basics.subclassing.rst +++ b/doc/source/user/basics.subclassing.rst @@ -346,7 +346,7 @@ Simple example - adding an extra attribute to ndarray class InfoArray(np.ndarray): - def __new__(subtype, shape, dtype=float, buffer=None, offset=0, + def __new__(subtype, shape, dtype=np.float64, buffer=None, offset=0, strides=None, order=None, info=None): # Create the ndarray instance of our type, given the usual # ndarray input arguments. This will call the standard @@ -779,5 +779,3 @@ your function's signature should accept ``**kwargs``. For example: This object is now compatible with ``np.sum`` again because any extraneous arguments (i.e. keywords that are not ``axis`` or ``dtype``) will be hidden away in the ``**unused_kwargs`` parameter. - - diff --git a/doc/source/user/basics.ufuncs.rst b/doc/source/user/basics.ufuncs.rst index 773fe86c21d2..5c91ab6c0168 100644 --- a/doc/source/user/basics.ufuncs.rst +++ b/doc/source/user/basics.ufuncs.rst @@ -18,6 +18,25 @@ is, a ufunc is a ":term:`vectorized `" wrapper for a function that takes a fixed number of specific inputs and produces a fixed number of specific outputs. +There are also :ref:`generalized ufuncs ` which +are functions over vectors (or arrays) instead of single-element scalars. +For example, :func:`numpy.add` is a ufunc that operates element-by-element, +while :func:`numpy.matmul` is a gufunc that operates on vectors/matrices:: + + >>> a = np.arange(6).reshape(3, 2) + >>> a + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.add(a, a) # element-wise addition + array([[ 0, 2], + [ 4, 6], + [ 8, 10]]) + >>> np.matmul(a, a.T) # matrix multiplication (3x2) @ (2x3) -> (3x3) + array([[ 1, 3, 5], + [ 3, 13, 23], + [ 5, 23, 41]]) + In NumPy, universal functions are instances of the :class:`numpy.ufunc` class. Many of the built-in functions are implemented in compiled C code. The basic ufuncs operate on scalars, but @@ -35,12 +54,30 @@ One can also produce custom :class:`numpy.ufunc` instances using the Ufunc methods ============= -All ufuncs have four methods. They can be found at -:ref:`ufuncs.methods`. However, these methods only make sense on scalar -ufuncs that take two input arguments and return one output argument. +All ufuncs have 5 methods. 4 reduce-like methods +(:meth:`~numpy.ufunc.reduce`, :meth:`~numpy.ufunc.accumulate`, +:meth:`~numpy.ufunc.reduceat`, :meth:`~numpy.ufunc.outer`) and one +for inplace operations (:meth:`~numpy.ufunc.at`). +See :ref:`ufuncs.methods` for more. However, these methods only make sense on +ufuncs that take two input arguments and return one output argument (so-called +"scalar" ufuncs since the inner loop operates on a single scalar value). Attempting to call these methods on other ufuncs will cause a :exc:`ValueError`. +For example, :func:`numpy.add` takes two inputs and returns one output, +so its methods work:: + + >>> np.add.reduce([1, 2, 3]) + 6 + +But :func:`numpy.divmod` returns two outputs (quotient and remainder), +so calling its methods raises an error:: + + >>> np.divmod.reduce([1, 2, 3]) + Traceback (most recent call last): + ... + ValueError: reduce only supported for functions returning a single value + The reduce-like methods all take an *axis* keyword, a *dtype* keyword, and an *out* keyword, and the arrays must all have dimension >= 1. The *axis* keyword specifies the axis of the array over which the reduction @@ -76,7 +113,7 @@ an integer (or Boolean) data-type and smaller than the size of the >>> x.dtype dtype('int64') - >>> np.multiply.reduce(x, dtype=float) + >>> np.multiply.reduce(x, dtype=np.float64) array([ 0., 28., 80.]) Finally, the *out* keyword allows you to @@ -84,10 +121,10 @@ provide an output array (or a tuple of output arrays for multi-output ufuncs). If *out* is given, the *dtype* argument is only used for the internal computations. Considering ``x`` from the previous example:: - >>> y = np.zeros(3, dtype=int) + >>> y = np.zeros(3, dtype=np.int_) >>> y array([0, 0, 0]) - >>> np.multiply.reduce(x, dtype=float, out=y) + >>> np.multiply.reduce(x, dtype=np.float64, out=y) array([ 0, 28, 80]) Ufuncs also have a fifth method, :func:`numpy.ufunc.at`, that allows in place diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst index c699760fdebd..19763f7c2a51 100644 --- a/doc/source/user/c-info.python-as-glue.rst +++ b/doc/source/user/c-info.python-as-glue.rst @@ -159,40 +159,102 @@ work with multidimensional arrays. Notice that Cython is an extension-module generator only. Unlike f2py, it includes no automatic facility for compiling and linking -the extension module (which must be done in the usual fashion). It -does provide a modified distutils class called ``build_ext`` which lets -you build an extension module from a ``.pyx`` source. Thus, you could -write in a ``setup.py`` file: +the extension module. However, many Python build tools have support for Cython. + +Here is an example of how to set up a Python project that contains a Cython +extension. The example uses the `meson-python Python build backend +`_ and `the meson build system +`_. This is the same build system NumPy itself uses. +. + +First, create a file named ``my_extension.pyx``. + +.. code-block:: cython + + cimport numpy as np + + def say_hello(): + print("Hello!") + +This file lives next to a ``__init__.py`` file with the following content: .. code-block:: python - from Cython.Distutils import build_ext - from distutils.extension import Extension - from distutils.core import setup - import numpy - - setup(name='mine', description='Nothing', - ext_modules=[Extension('filter', ['filter.pyx'], - include_dirs=[numpy.get_include()])], - cmdclass = {'build_ext':build_ext}) - -Adding the NumPy include directory is, of course, only necessary if -you are using NumPy arrays in the extension module (which is what we -assume you are using Cython for). The distutils extensions in NumPy -also include support for automatically producing the extension-module -and linking it from a ``.pyx`` file. It works so that if the user does -not have Cython installed, then it looks for a file with the same -file-name but a ``.c`` extension which it then uses instead of trying -to produce the ``.c`` file again. - -If you just use Cython to compile a standard Python module, then you -will get a C extension module that typically runs a bit faster than the -equivalent Python module. Further speed increases can be gained by using -the ``cdef`` keyword to statically define C variables. + from .my_extension import say_hello + +Now you need to create two more files to set up the build system. First, a +``meson.build`` file: + +.. code-block:: meson + + project( + 'module_with_extension', + 'c', 'cython', + version: '0.0.1', + license: 'MIT', + ) + + cython = find_program('cython') + py = import('python').find_installation(pure: false) + + numpy_nodepr_api = ['-DNPY_NO_DEPRECATED_API=NPY_2_0_API_VERSION'] + + np_dep = declare_dependency(dependencies: dependency('numpy'), + compile_args: numpy_nodepr_api) + + py.extension_module( + 'my_extension', + 'my_extension.pyx', + dependencies: [np_dep], + install: true, + subdir: 'my_module_with_extension', + ) + + py.install_sources( + '__init__.py', + subdir: 'my_module_with_extension', + ) + +And a ``pyproject.toml`` file with the following content: + +.. code-block:: toml + + [build-system] + build-backend = "mesonpy" + requires = [ + "meson-python", + "Cython>=3.0.0", + "numpy", + ] + + [project] + name = "my_module_with_extension" + version = "0.0.1" + license = "MIT" + dependencies = ["numpy"] + +You should then be able to do the following command to build, install, and call +the function defined in the extension from Python: + +.. code-block:: bash + + $ pip install . + $ python -c "from my_module_with_extension import say_hello; say_hello()" + "Hello!" + +Adding a NumPy dependency to your Meson configuration is only necessary +if you are using the NumPy C API in the extension module via ``cimport +numpy`` (which is what we assume you are using Cython for). If you just +use Cython to compile a standard Python module, then you will get a C +extension module that typically runs a bit faster than the equivalent +Python module. Further speed increases can be gained by using the +``cdef`` keyword to statically define C variables. + +See the meson and meson-python documentation for more details on how to +build more complicated extensions. Let's look at two examples we've seen before to see how they might be -implemented using Cython. These examples were compiled into extension -modules using Cython 0.21.1. +implemented using Cython. Complex addition in Cython @@ -763,19 +825,13 @@ Conclusion Using ctypes is a powerful way to connect Python with arbitrary C-code. Its advantages for extending Python include -- clean separation of C code from Python code - - - no need to learn a new syntax except Python and C - - - allows reuse of C code - - - functionality in shared libraries written for other purposes can be - obtained with a simple Python wrapper and search for the library. - - -- easy integration with NumPy through the ctypes attribute - -- full argument checking with the ndpointer class factory +* clean separation of C code from Python code +* no need to learn a new syntax except Python and C +* allows reuse of C code +* functionality in shared libraries written for other purposes can be + obtained with a simple Python wrapper and search for the library. +* easy integration with NumPy through the ctypes attribute +* full argument checking with the ndpointer class factory Its disadvantages include diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 09daa95b7875..e5773f8232b8 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -608,7 +608,7 @@ After the above has been installed, it can be imported and used as follows. >>> import npufunc >>> npufunc.logit(0.5) np.float64(0.0) ->>> a = np.linspace(0, 1, 5, dtype="f4") +>>> a = np.linspace(0, 1, 5, dtype=np.float32) >>> npufunc.logit(a) :1: RuntimeWarning: divide by zero encountered in logit array([ -inf, -1.0986123, 0. , 1.0986123, inf], diff --git a/doc/source/user/misc.rst b/doc/source/user/misc.rst index 6d652e3ca67f..a882afa37afd 100644 --- a/doc/source/user/misc.rst +++ b/doc/source/user/misc.rst @@ -7,7 +7,7 @@ Miscellaneous IEEE 754 floating point special values -------------------------------------- -Special values defined in numpy: nan, inf, +Special values defined in numpy: :data:`~numpy.nan`, :data:`~numpy.inf` NaNs can be used as a poor-man's mask (if you don't care what the original value was) @@ -17,29 +17,39 @@ Note: cannot use equality to test NaNs. E.g.: :: >>> myarr = np.array([1., 0., np.nan, 3.]) >>> np.nonzero(myarr == np.nan) (array([], dtype=int64),) + +:: + >>> np.nan == np.nan # is always False! Use special numpy functions instead. False + +:: + >>> myarr[myarr == np.nan] = 0. # doesn't work >>> myarr array([ 1., 0., nan, 3.]) + +:: + >>> myarr[np.isnan(myarr)] = 0. # use this instead find >>> myarr array([1., 0., 0., 3.]) -Other related special value functions: :: +Other related special value functions: - isinf(): True if value is inf - isfinite(): True if not nan or inf - nan_to_num(): Map nan to 0, inf to max float, -inf to min float +- :func:`~numpy.isnan` - True if value is nan +- :func:`~numpy.isinf` - True if value is inf +- :func:`~numpy.isfinite` - True if not nan or inf +- :func:`~numpy.nan_to_num` - Map nan to 0, inf to max float, -inf to min float The following corresponds to the usual functions except that nans are excluded -from the results: :: +from the results: - nansum() - nanmax() - nanmin() - nanargmax() - nanargmin() +- :func:`~numpy.nansum` +- :func:`~numpy.nanmax` +- :func:`~numpy.nanmin` +- :func:`~numpy.nanargmax` +- :func:`~numpy.nanargmin` >>> x = np.arange(10.) >>> x[3] = np.nan @@ -47,168 +57,3 @@ from the results: :: nan >>> np.nansum(x) 42.0 - -How numpy handles numerical exceptions --------------------------------------- - -The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` -and ``'ignore'`` for ``underflow``. But this can be changed, and it can be -set individually for different kinds of exceptions. The different behaviors -are: - - - 'ignore' : Take no action when the exception occurs. - - 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module). - - 'raise' : Raise a `FloatingPointError`. - - 'call' : Call a function specified using the `seterrcall` function. - - 'print' : Print a warning directly to ``stdout``. - - 'log' : Record error in a Log object specified by `seterrcall`. - -These behaviors can be set for all kinds of errors or specific ones: - - - all : apply to all numeric exceptions - - invalid : when NaNs are generated - - divide : divide by zero (for integers as well!) - - overflow : floating point overflows - - underflow : floating point underflows - -Note that integer divide-by-zero is handled by the same machinery. -These behaviors are set on a per-thread basis. - -Examples --------- - -:: - - >>> oldsettings = np.seterr(all='warn') - >>> np.zeros(5,dtype=np.float32)/0. - Traceback (most recent call last): - ... - RuntimeWarning: invalid value encountered in divide - >>> j = np.seterr(under='ignore') - >>> np.array([1.e-100])**10 - array([0.]) - >>> j = np.seterr(invalid='raise') - >>> np.sqrt(np.array([-1.])) - Traceback (most recent call last): - ... - FloatingPointError: invalid value encountered in sqrt - >>> def errorhandler(errstr, errflag): - ... print("saw stupid error!") - >>> np.seterrcall(errorhandler) - >>> j = np.seterr(all='call') - >>> np.zeros(5, dtype=np.int32)/0 - saw stupid error! - array([nan, nan, nan, nan, nan]) - >>> j = np.seterr(**oldsettings) # restore previous - ... # error-handling settings - -Interfacing to C ----------------- -Only a survey of the choices. Little detail on how each works. - -1) Bare metal, wrap your own C-code manually. - - - Plusses: - - - Efficient - - No dependencies on other tools - - - Minuses: - - - Lots of learning overhead: - - - need to learn basics of Python C API - - need to learn basics of numpy C API - - need to learn how to handle reference counting and love it. - - - Reference counting often difficult to get right. - - - getting it wrong leads to memory leaks, and worse, segfaults - -2) Cython - - - Plusses: - - - avoid learning C API's - - no dealing with reference counting - - can code in pseudo python and generate C code - - can also interface to existing C code - - should shield you from changes to Python C api - - has become the de-facto standard within the scientific Python community - - fast indexing support for arrays - - - Minuses: - - - Can write code in non-standard form which may become obsolete - - Not as flexible as manual wrapping - -3) ctypes - - - Plusses: - - - part of Python standard library - - good for interfacing to existing shareable libraries, particularly - Windows DLLs - - avoids API/reference counting issues - - good numpy support: arrays have all these in their ctypes - attribute: :: - - a.ctypes.data - a.ctypes.data_as - a.ctypes.shape - a.ctypes.shape_as - a.ctypes.strides - a.ctypes.strides_as - - - Minuses: - - - can't use for writing code to be turned into C extensions, only a wrapper - tool. - -4) SWIG (automatic wrapper generator) - - - Plusses: - - - around a long time - - multiple scripting language support - - C++ support - - Good for wrapping large (many functions) existing C libraries - - - Minuses: - - - generates lots of code between Python and the C code - - can cause performance problems that are nearly impossible to optimize - out - - interface files can be hard to write - - doesn't necessarily avoid reference counting issues or needing to know - API's - -5) Psyco - - - Plusses: - - - Turns pure python into efficient machine code through jit-like - optimizations - - very fast when it optimizes well - - - Minuses: - - - Only on intel (windows?) - - Doesn't do much for numpy? - -Interfacing to Fortran: ------------------------ -The clear choice to wrap Fortran code is -`f2py `_. - -Pyfort is an older alternative, but not supported any longer. -Fwrap is a newer project that looked promising but isn't being developed any -longer. - -Interfacing to C++: -------------------- - 1) Cython - 2) CXX - 3) Boost.python - 4) SWIG - 5) SIP (used mainly in PyQT) diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index ef4a0467c706..b611b5000eef 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -163,7 +163,7 @@ The type of the array can also be explicitly specified at creation time: :: - >>> c = np.array([[1, 2], [3, 4]], dtype=complex) + >>> c = np.array([[1, 2], [3, 4]], dtype=np.complex128) >>> c array([[1.+0.j, 2.+0.j], [3.+0.j, 4.+0.j]]) @@ -346,7 +346,7 @@ existing array rather than create a new one. :: >>> rg = np.random.default_rng(1) # create instance of default random number generator - >>> a = np.ones((2, 3), dtype=int) + >>> a = np.ones((2, 3), dtype=np.int_) >>> b = rg.random((2, 3)) >>> a *= 3 >>> a @@ -535,7 +535,7 @@ are given in a tuple separated by commas:: >>> def f(x, y): ... return 10 * x + y ... - >>> b = np.fromfunction(f, (5, 4), dtype=int) + >>> b = np.fromfunction(f, (5, 4), dtype=np.int_) >>> b array([[ 0, 1, 2, 3], [10, 11, 12, 13], @@ -1256,7 +1256,7 @@ set `__: ... A, B = np.meshgrid(x, y) ... C = A + B*1j ... z = np.zeros_like(C) - ... divtime = maxit + np.zeros(z.shape, dtype=int) + ... divtime = maxit + np.zeros(z.shape, dtype=np.int_) ... ... for i in range(maxit): ... z = z**2 + C @@ -1380,8 +1380,8 @@ and then use it as:: The advantage of this version of reduce compared to the normal ufunc.reduce is that it makes use of the :ref:`broadcasting rules ` -in order to avoid creating an argument array the size of the output -times the number of vectors. +in order to avoid creating a temporary array that needs as much memory +as the output array multiplied by the number of vectors. Indexing with strings --------------------- diff --git a/doc/ufuncs.rst b/doc/ufuncs.rst index 077195fa59b7..4344ff9ab121 100644 --- a/doc/ufuncs.rst +++ b/doc/ufuncs.rst @@ -78,10 +78,11 @@ If there are object arrays involved then loop->obj gets set to 1. Then there ar loop, then "remainder" DECREF's are needed). Outputs: - - castbuf contains a new reference as the result of the function call. This - gets converted to the type of interest and. This new reference in castbuf - will be DECREF'd by later calls to the function. Thus, only after the - inner most loop do we need to DECREF the remaining references in castbuf. + - castbuf contains a new reference as the result of the function call. + This is converted to the type of interest, and this new reference + in castbuf will be DECREF'd (its reference count decreased) by + later calls to the function. Thus, only after the innermost loop + finishes do we need to DECREF the remaining references in castbuf. 2) The loop function is of a different type: diff --git a/environment.yml b/environment.yml index c5ee0c381bb3..38801afcb52c 100644 --- a/environment.yml +++ b/environment.yml @@ -7,7 +7,7 @@ name: numpy-dev channels: - conda-forge dependencies: - - python=3.12 # need to pin to avoid issues with builds + - python=3.12 # need to pin to avoid issues with builds - cython>=3.0 - compilers - openblas @@ -24,9 +24,8 @@ dependencies: - pytest-xdist - hypothesis # For type annotations - - typing_extensions>=4.5.0 - - mypy=1.18.2 - - orjson # makes mypy faster + - mypy=1.20.0 + - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 - sphinx-copybutton @@ -46,7 +45,7 @@ dependencies: - breathe>4.33.0 # For linting - cython-lint - - ruff=0.14.0 + - ruff=0.15.10 - gitpython # Used in some tests - cffi diff --git a/meson.build b/meson.build index 2cb7ce987ad5..a72c5bb02734 100644 --- a/meson.build +++ b/meson.build @@ -25,10 +25,11 @@ if cc.get_id() == 'gcc' error('NumPy requires GCC >= 9.3') endif elif cc.get_id() == 'msvc' - if not cc.version().version_compare('>=19.20') + if not cc.version().version_compare('>=19.35') error('NumPy requires at least vc142 (default with Visual Studio 2019) ' + \ 'when building with MSVC') endif + add_project_arguments('/experimental:c11atomics', language: 'c') endif if not cy.version().version_compare('>=3.0.6') error('NumPy requires Cython >= 3.0.6') diff --git a/meson.options b/meson.options index e7011a3b2f2e..8ba7c4b79e03 100644 --- a/meson.options +++ b/meson.options @@ -1,45 +1,53 @@ +# BLAS / LAPACK selection option('blas', type: 'string', value: 'auto', - description: 'Option for BLAS library selection. By default, try to find any in the order given by `blas-order`') + description: 'BLAS library to use (default: autodetect based on `blas-order`)') option('lapack', type: 'string', value: 'auto', - description: 'Option for LAPACK library selection. By default, try to find any in the order given by `lapack-order`') + description: 'LAPACK library to use (default: autodetect based on `lapack-order`)') option('allow-noblas', type: 'boolean', value: true, - description: 'If set to true, allow building with (slow!) internal fallback routines') + description: 'Allow building with (slow!) internal fallback routines if no BLAS library is found') option('blas-order', type: 'array', value: ['auto'], - description: 'Order of BLAS libraries to search for. E.g.: mkl,openblas,blis,blas') + description: 'Preferred search order for BLAS libraries (e.g., mkl, openblas, blis, blas)') option('lapack-order', type: 'array', value: ['auto'], - description: 'Order of LAPACK libraries to search for. E.g.: mkl,openblas,lapack') + description: 'Preferred search order for LAPACK libraries (e.g., mkl, openblas, lapack)') option('use-ilp64', type: 'boolean', value: false, - description: 'Use ILP64 (64-bit integer) BLAS and LAPACK interfaces') + description: 'Use ILP64 (64-bit integer) BLAS/LAPACK interfaces') option('blas-symbol-suffix', type: 'string', value: 'auto', - description: 'BLAS and LAPACK symbol suffix to use, if any') -option('mkl-threading', type: 'string', value: 'auto', - description: 'MKL threading method, one of: `seq`, `iomp`, `gomp`, `tbb`') -option('disable-svml', type: 'boolean', value: false, - description: 'Disable building against SVML') -option('disable-highway', type: 'boolean', value: false, - description: 'Disables SIMD-optimized operations related to Google Highway') -option('disable-intel-sort', type: 'boolean', value: false, - description: 'Disables SIMD-optimized operations related to Intel x86-simd-sort') + description: 'Symbol suffix for BLAS/LAPACK symbols (if any)') +option('mkl-threading', type: 'combo', value: 'auto', + choices: ['auto', 'seq', 'iomp', 'gomp', 'tbb'], + description: 'Threading backend for MKL') + +# Threading & parallelism option('disable-threading', type: 'boolean', value: false, description: 'Disable threading support (see `NPY_ALLOW_THREADS` docs)') option('enable-openmp', type: 'boolean', value: false, - description: 'Enable building NumPy with openmp support') + description: 'Enable compilation with OpenMP support') + +# CPU optimization / SIMD option('disable-optimization', type: 'boolean', value: false, - description: 'Disable CPU optimized code (dispatch,simd,unroll...)') + description: 'Disable all CPU optimizations (dispatch, SIMD, loop unrolling)') +option('disable-svml', type: 'boolean', value: false, + description: 'Disable use of Intel SVML') +option('disable-highway', type: 'boolean', value: false, + description: 'Disable SIMD-optimized operations related to Google Highway') +option('disable-intel-sort', type: 'boolean', value: false, + description: 'Disable SIMD-optimized operations related to Intel x86-simd-sort') option('cpu-baseline', type: 'string', value: 'min', - description: 'Minimal set of required CPU features') + description: 'Minimal set of required CPU features') option('cpu-baseline-detect', type: 'feature', value: 'auto', - description: 'Detect CPU baseline from the compiler flags') + description: 'Detect CPU baseline from the compiler flags') option('cpu-dispatch', type: 'string', value: 'max', - description: 'Dispatched set of additional CPU features') + description: 'Additional CPU features to dispatch to (beyond baseline)') + +# SIMD test options option('test-simd', type: 'array', - value: [ - 'BASELINE', 'X86_V2', 'X86_V3', 'X86_V4', - 'VSX', 'VSX2', 'VSX3', 'VSX4', - 'NEON', 'ASIMD', - 'VX', 'VXE', 'VXE2', - 'LSX', - ], - description: 'Specify a list of CPU features to be tested against NumPy SIMD interface') + value: [ + 'BASELINE', 'X86_V2', 'X86_V3', 'X86_V4', + 'VSX', 'VSX2', 'VSX3', 'VSX4', + 'NEON', 'ASIMD', + 'VX', 'VXE', 'VXE2', + 'LSX', + ], + description: 'CPU SIMD feature sets to be tested by the NumPy SIMD test module') option('test-simd-args', type: 'string', value: '', - description: 'Extra args to be passed to the `_simd` module that is used for testing the NumPy SIMD interface') + description: 'Extra arguments passed to the internal `_simd` test module') diff --git a/meson_cpu/arm/meson.build b/meson_cpu/arm/meson.build index 5478e52cdcea..92d241883795 100644 --- a/meson_cpu/arm/meson.build +++ b/meson_cpu/arm/meson.build @@ -2,21 +2,21 @@ source_root = meson.project_source_root() mod_features = import('features') NEON = mod_features.new( 'NEON', 1, - test_code: files(source_root + '/numpy/distutils/checks/cpu_neon.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_neon.c')[0] ) NEON_FP16 = mod_features.new( 'NEON_FP16', 2, implies: NEON, - test_code: files(source_root + '/numpy/distutils/checks/cpu_neon_fp16.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_neon_fp16.c')[0] ) # FMA NEON_VFPV4 = mod_features.new( 'NEON_VFPV4', 3, implies: NEON_FP16, - test_code: files(source_root + '/numpy/distutils/checks/cpu_neon_vfpv4.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_neon_vfpv4.c')[0] ) # Advanced SIMD ASIMD = mod_features.new( 'ASIMD', 4, implies: NEON_VFPV4, detect: {'val': 'ASIMD', 'match': 'NEON.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimd.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimd.c')[0] ) cpu_family = host_machine.cpu_family() if cpu_family == 'aarch64' @@ -37,25 +37,25 @@ endif ASIMDHP = mod_features.new( 'ASIMDHP', 5, implies: ASIMD, args: {'val': '-march=armv8.2-a+fp16', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimdhp.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimdhp.c')[0] ) ## ARMv8.2 dot product ASIMDDP = mod_features.new( 'ASIMDDP', 6, implies: ASIMD, args: {'val': '-march=armv8.2-a+dotprod', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimddp.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimddp.c')[0] ) ## ARMv8.2 Single & half-precision Multiply ASIMDFHM = mod_features.new( 'ASIMDFHM', 7, implies: ASIMDHP, args: {'val': '-march=armv8.2-a+fp16fml', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimdfhm.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimdfhm.c')[0] ) ## Scalable Vector Extensions (SVE) SVE = mod_features.new( 'SVE', 8, implies: ASIMDHP, args: {'val': '-march=armv8.2-a+sve', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_sve.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_sve.c')[0] ) # TODO: Add support for MSVC ARM_FEATURES = { diff --git a/meson_cpu/loongarch64/meson.build b/meson_cpu/loongarch64/meson.build index 570e3bfcda01..d59b5682d646 100644 --- a/meson_cpu/loongarch64/meson.build +++ b/meson_cpu/loongarch64/meson.build @@ -3,6 +3,6 @@ mod_features = import('features') LSX = mod_features.new( 'LSX', 1, args: ['-mlsx'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_lsx.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_lsx.c')[0] ) LOONGARCH64_FEATURES = {'LSX': LSX} diff --git a/meson_cpu/meson.build b/meson_cpu/meson.build index 02bbe5f7618e..3cd5ffd7e16c 100644 --- a/meson_cpu/meson.build +++ b/meson_cpu/meson.build @@ -203,7 +203,7 @@ foreach opt_name, conf : parse_options warning('CPU Feature "@0@" is no longer explicitly supported, redirecting to "@1@".'.format(tok, ntok)) endif warning('Please check the latest documentation for build options.') - if ntok == '' or not append # redirected features not safe to be execluded + if ntok == '' or not append # redirected features not safe to be excluded continue endif tok = ntok diff --git a/meson_cpu/ppc64/meson.build b/meson_cpu/ppc64/meson.build index 57fe47140429..58690d1fa80a 100644 --- a/meson_cpu/ppc64/meson.build +++ b/meson_cpu/ppc64/meson.build @@ -3,19 +3,17 @@ mod_features = import('features') compiler_id = meson.get_compiler('c').get_id() VSX = mod_features.new( - 'VSX', 1, args: '-mvsx', - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx.c')[0], + 'VSX', 1, args: ['-mvsx', '-DHWY_COMPILE_ONLY_STATIC', '-DHWY_DISABLE_ATTR'] + + (compiler_id == 'clang' ? ['-maltivec'] : []), + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx.c')[0], extra_tests: { - 'VSX_ASM': files(source_root + '/numpy/distutils/checks/extra_vsx_asm.c')[0] + 'VSX_ASM': files(source_root + '/numpy/_core/src/_simd/checks/extra_vsx_asm.c')[0] } ) -if compiler_id == 'clang' - VSX.update(args: ['-mvsx', '-maltivec']) -endif VSX2 = mod_features.new( 'VSX2', 2, implies: VSX, args: {'val': '-mcpu=power8', 'match': '.*vsx'}, detect: {'val': 'VSX2', 'match': 'VSX'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx2.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx2.c')[0], ) # VSX2 is hardware baseline feature on ppc64le since the first little-endian # support was part of Power8 @@ -25,17 +23,17 @@ endif VSX3 = mod_features.new( 'VSX3', 3, implies: VSX2, args: {'val': '-mcpu=power9', 'match': '.*(?:mcpu=|vsx).*'}, detect: {'val': 'VSX3', 'match': 'VSX.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx3.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx3.c')[0], extra_tests: { - 'VSX3_HALF_DOUBLE': files(source_root + '/numpy/distutils/checks/extra_vsx3_half_double.c')[0] + 'VSX3_HALF_DOUBLE': files(source_root + '/numpy/_core/src/_simd/checks/extra_vsx3_half_double.c')[0] } ) VSX4 = mod_features.new( 'VSX4', 4, implies: VSX3, args: {'val': '-mcpu=power10', 'match': '.*(?:mcpu=|vsx).*'}, detect: {'val': 'VSX4', 'match': 'VSX.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx4.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx4.c')[0], extra_tests: { - 'VSX4_MMA': files(source_root + '/numpy/distutils/checks/extra_vsx4_mma.c')[0] + 'VSX4_MMA': files(source_root + '/numpy/_core/src/_simd/checks/extra_vsx4_mma.c')[0] } ) PPC64_FEATURES = {'VSX': VSX, 'VSX2': VSX2, 'VSX3': VSX3, 'VSX4': VSX4} diff --git a/meson_cpu/riscv64/meson.build b/meson_cpu/riscv64/meson.build index 3f930f39e27e..fdab67d246d6 100644 --- a/meson_cpu/riscv64/meson.build +++ b/meson_cpu/riscv64/meson.build @@ -3,6 +3,6 @@ mod_features = import('features') RVV = mod_features.new( 'RVV', 1, args: ['-march=rv64gcv'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_rvv.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_rvv.c')[0], ) RV64_FEATURES = {'RVV': RVV} diff --git a/meson_cpu/s390x/meson.build b/meson_cpu/s390x/meson.build index b7a420c27f0d..282ec056e78e 100644 --- a/meson_cpu/s390x/meson.build +++ b/meson_cpu/s390x/meson.build @@ -3,16 +3,16 @@ mod_features = import('features') VX = mod_features.new( 'VX', 1, args: ['-mzvector', '-march=arch11'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_vx.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vx.c')[0], ) VXE = mod_features.new( 'VXE', 2, implies: VX, args: {'val': '-march=arch12', 'match': '-march=.*'}, detect: {'val': 'VXE', 'match': '\\bvxe\\b'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vxe.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vxe.c')[0], ) VXE2 = mod_features.new( 'VXE2', 3, implies: VXE, args: {'val': '-march=arch13', 'match': '-march=.*'}, detect: {'val': 'VXE2', 'match': '\\bvxe2\\b'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vxe2.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vxe2.c')[0], ) S390X_FEATURES = {'VX': VX, 'VXE': VXE, 'VXE2': VXE2} diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index add073376d98..412803e5ddbb 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -43,14 +43,14 @@ AVX512_ICL = mod_features.new( group: ['AVX512VBMI', 'AVX512VBMI2', 'AVX512VNNI', 'AVX512BITALG', 'AVX512VPOPCNTDQ', 'AVX512IFMA', 'VAES', 'GFNI', 'VPCLMULQDQ'], detect: 'AVX512_ICL', - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_icl.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_avx512_icl.c')[0] ) AVX512_SPR = mod_features.new( 'AVX512_SPR', 35, implies: AVX512_ICL, args: ['-mavx512fp16', '-mavx512bf16'], group: ['AVX512FP16', 'AVX512BF16'], detect: 'AVX512_SPR', - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_spr.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_avx512_spr.c')[0] ) # Specializations for non unix-like compilers diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index c71898626070..e05c20c57761 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -227,18 +227,30 @@ cdef extern from "numpy/arrayobject.h": pass ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: - # Use PyDataType_* macros when possible, however there are no macros - # for accessing some of the fields, so some are defined. - cdef PyTypeObject* typeobj - cdef char kind - cdef char type + @property + cdef inline PyTypeObject* typeobj(self) noexcept nogil: + return PyDataType_TYPEOBJ(self) + + @property + cdef inline char kind(self) noexcept nogil: + return PyDataType_KIND(self) + + @property + cdef inline char type(self) noexcept nogil: + return PyDataType_TYPE(self) + # Numpy sometimes mutates this without warning (e.g. it'll # sometimes change "|" to "<" in shared dtype objects on # little-endian machines). If this matters to you, use # PyArray_IsNativeByteOrder(dtype.byteorder) instead of # directly accessing this field. - cdef char byteorder - cdef int type_num + @property + cdef inline char byteorder(self) noexcept nogil: + return PyDataType_BYTEORDER(self) + + @property + cdef inline int type_num(self) noexcept nogil: + return PyDataType_TYPENUM(self) @property cdef inline npy_intp itemsize(self) noexcept nogil: @@ -428,6 +440,11 @@ cdef extern from "numpy/arrayobject.h": PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil PyObject* PyDataType_NAMES(dtype) nogil PyObject* PyDataType_FIELDS(dtype) nogil + char PyDataType_TYPE(dtype) nogil + char PyDataType_KIND(dtype) nogil + int PyDataType_TYPENUM(dtype) nogil + char PyDataType_BYTEORDER(dtype) nogil + PyTypeObject* PyDataType_TYPEOBJ(dtype) nogil bint PyDataType_ISBOOL(dtype) nogil bint PyDataType_ISUNSIGNED(dtype) nogil @@ -905,20 +922,69 @@ cdef extern from "numpy/ufuncobject.h": ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + ctypedef struct PyUFuncObject_fields: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + ctypedef struct PyUFuncObject: + pass + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: - cdef: - int nin, nout, nargs - int identity - PyUFuncGenericFunction *functions - void **data - int ntypes - int check_return - char *name - char *types - char *doc - void *ptr - PyObject *obj - PyObject *userloops + @property + cdef inline int nin(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).nin + + @property + cdef inline int nout(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).nout + + @property + cdef inline int nargs(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).nargs + + @property + cdef inline PyUFuncGenericFunction* functions(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).functions + + @property + cdef inline void ** data(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).data + + @property + cdef inline int ntypes(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).ntypes + + @property + cdef inline const char* name(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).name + + @property + cdef inline const char* doc(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).doc + + @property + cdef inline void* ptr(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).ptr + + @property + cdef inline PyObject* obj(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).obj + + @property + cdef inline PyObject* userloops(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).userloops + + PyUFuncObject_fields *_PyUFuncObject_GET_ITEM_DATA(ufunc) nogil cdef enum: PyUFunc_Zero diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index 40a24b6c7cc1..ddb904c1fd68 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -1,1155 +1,12 @@ # NumPy static imports for Cython < 3.0 # -# If any of the PyArray_* functions are called, import_array must be -# called first. +# DO NOT USE OR REFER TO THIS HEADER # -# Author: Dag Sverre Seljebotn +# This is provided only to generate an error message on older Cython +# versions. # - -DEF _buffer_format_string_len = 255 - -cimport cpython.buffer as pybuf -from cpython.ref cimport Py_INCREF -from cpython.mem cimport PyObject_Malloc, PyObject_Free -from cpython.object cimport PyObject, PyTypeObject -from cpython.buffer cimport PyObject_GetBuffer -from cpython.type cimport type -cimport libc.stdio as stdio - - -cdef extern from *: - # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. - # See https://github.com/cython/cython/issues/3573 - """ - /* Using NumPy API declarations from "numpy/__init__.pxd" */ - """ - - -cdef extern from "Python.h": - ctypedef int Py_intptr_t - bint PyObject_TypeCheck(object obj, PyTypeObject* type) - -cdef extern from "numpy/arrayobject.h": - # It would be nice to use size_t and ssize_t, but ssize_t has special - # implicit conversion rules, so just use "long". - # Note: The actual type only matters for Cython promotion, so long - # is closer than int, but could lead to incorrect promotion. - # (Not to worrying, and always the status-quo.) - ctypedef signed long npy_intp - ctypedef unsigned long npy_uintp - - ctypedef unsigned char npy_bool - - ctypedef signed char npy_byte - ctypedef signed short npy_short - ctypedef signed int npy_int - ctypedef signed long npy_long - ctypedef signed long long npy_longlong - - ctypedef unsigned char npy_ubyte - ctypedef unsigned short npy_ushort - ctypedef unsigned int npy_uint - ctypedef unsigned long npy_ulong - ctypedef unsigned long long npy_ulonglong - - ctypedef float npy_float - ctypedef double npy_double - ctypedef long double npy_longdouble - - ctypedef signed char npy_int8 - ctypedef signed short npy_int16 - ctypedef signed int npy_int32 - ctypedef signed long long npy_int64 - - ctypedef unsigned char npy_uint8 - ctypedef unsigned short npy_uint16 - ctypedef unsigned int npy_uint32 - ctypedef unsigned long long npy_uint64 - - ctypedef float npy_float32 - ctypedef double npy_float64 - ctypedef long double npy_float80 - ctypedef long double npy_float96 - ctypedef long double npy_float128 - - ctypedef struct npy_cfloat: - pass - - ctypedef struct npy_cdouble: - pass - - ctypedef struct npy_clongdouble: - pass - - ctypedef struct npy_complex64: - pass - - ctypedef struct npy_complex128: - pass - - ctypedef struct npy_complex160: - pass - - ctypedef struct npy_complex192: - pass - - ctypedef struct npy_complex256: - pass - - ctypedef struct PyArray_Dims: - npy_intp *ptr - int len - - - cdef enum NPY_TYPES: - NPY_BOOL - NPY_BYTE - NPY_UBYTE - NPY_SHORT - NPY_USHORT - NPY_INT - NPY_UINT - NPY_LONG - NPY_ULONG - NPY_LONGLONG - NPY_ULONGLONG - NPY_FLOAT - NPY_DOUBLE - NPY_LONGDOUBLE - NPY_CFLOAT - NPY_CDOUBLE - NPY_CLONGDOUBLE - NPY_OBJECT - NPY_STRING - NPY_UNICODE - NPY_VSTRING - NPY_VOID - NPY_DATETIME - NPY_TIMEDELTA - NPY_NTYPES_LEGACY - NPY_NOTYPE - - NPY_INT8 - NPY_INT16 - NPY_INT32 - NPY_INT64 - NPY_UINT8 - NPY_UINT16 - NPY_UINT32 - NPY_UINT64 - NPY_FLOAT16 - NPY_FLOAT32 - NPY_FLOAT64 - NPY_FLOAT80 - NPY_FLOAT96 - NPY_FLOAT128 - NPY_COMPLEX64 - NPY_COMPLEX128 - NPY_COMPLEX160 - NPY_COMPLEX192 - NPY_COMPLEX256 - - NPY_INTP - NPY_UINTP - NPY_DEFAULT_INT # Not a compile time constant (normally)! - - ctypedef enum NPY_ORDER: - NPY_ANYORDER - NPY_CORDER - NPY_FORTRANORDER - NPY_KEEPORDER - - ctypedef enum NPY_CASTING: - NPY_NO_CASTING - NPY_EQUIV_CASTING - NPY_SAFE_CASTING - NPY_SAME_KIND_CASTING - NPY_UNSAFE_CASTING - NPY_SAME_VALUE_CASTING - - ctypedef enum NPY_CLIPMODE: - NPY_CLIP - NPY_WRAP - NPY_RAISE - - ctypedef enum NPY_SCALARKIND: - NPY_NOSCALAR, - NPY_BOOL_SCALAR, - NPY_INTPOS_SCALAR, - NPY_INTNEG_SCALAR, - NPY_FLOAT_SCALAR, - NPY_COMPLEX_SCALAR, - NPY_OBJECT_SCALAR - - ctypedef enum NPY_SORTKIND: - NPY_QUICKSORT - NPY_HEAPSORT - NPY_MERGESORT - - ctypedef enum NPY_SEARCHSIDE: - NPY_SEARCHLEFT - NPY_SEARCHRIGHT - - enum: - NPY_ARRAY_C_CONTIGUOUS - NPY_ARRAY_F_CONTIGUOUS - NPY_ARRAY_OWNDATA - NPY_ARRAY_FORCECAST - NPY_ARRAY_ENSURECOPY - NPY_ARRAY_ENSUREARRAY - NPY_ARRAY_ELEMENTSTRIDES - NPY_ARRAY_ALIGNED - NPY_ARRAY_NOTSWAPPED - NPY_ARRAY_WRITEABLE - NPY_ARRAY_WRITEBACKIFCOPY - - NPY_ARRAY_BEHAVED - NPY_ARRAY_BEHAVED_NS - NPY_ARRAY_CARRAY - NPY_ARRAY_CARRAY_RO - NPY_ARRAY_FARRAY - NPY_ARRAY_FARRAY_RO - NPY_ARRAY_DEFAULT - - NPY_ARRAY_IN_ARRAY - NPY_ARRAY_OUT_ARRAY - NPY_ARRAY_INOUT_ARRAY - NPY_ARRAY_IN_FARRAY - NPY_ARRAY_OUT_FARRAY - NPY_ARRAY_INOUT_FARRAY - - NPY_ARRAY_UPDATE_ALL - - cdef enum: - NPY_MAXDIMS # 64 on NumPy 2.x and 32 on NumPy 1.x - NPY_RAVEL_AXIS # Used for functions like PyArray_Mean - - ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) - - ctypedef struct PyArray_ArrayDescr: - # shape is a tuple, but Cython doesn't support "tuple shape" - # inside a non-PyObject declaration, so we have to declare it - # as just a PyObject*. - PyObject* shape - - ctypedef struct PyArray_Descr: - pass - - ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: - # Use PyDataType_* macros when possible, however there are no macros - # for accessing some of the fields, so some are defined. - cdef PyTypeObject* typeobj - cdef char kind - cdef char type - # Numpy sometimes mutates this without warning (e.g. it'll - # sometimes change "|" to "<" in shared dtype objects on - # little-endian machines). If this matters to you, use - # PyArray_IsNativeByteOrder(dtype.byteorder) instead of - # directly accessing this field. - cdef char byteorder - # Flags are not directly accessible on Cython <3. Use PyDataType_FLAGS. - # cdef char flags - cdef int type_num - # itemsize/elsize, alignment, fields, names, and subarray must - # use the `PyDataType_*` accessor macros. With Cython 3 you can - # still use getter attributes `dtype.itemsize` - - ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: - # Use through macros - pass - - ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: - cdef int numiter - cdef npy_intp size, index - cdef int nd - cdef npy_intp *dimensions - cdef void **iters - - ctypedef struct PyArrayObject: - # For use in situations where ndarray can't replace PyArrayObject*, - # like PyArrayObject**. - pass - - ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: - cdef __cythonbufferdefaults__ = {"mode": "strided"} - - cdef: - # Only taking a few of the most commonly used and stable fields. - # One should use PyArray_* macros instead to access the C fields. - char *data - int ndim "nd" - npy_intp *shape "dimensions" - npy_intp *strides - dtype descr # deprecated since NumPy 1.7 ! - PyObject* base # NOT PUBLIC, DO NOT USE ! - - - int _import_array() except -1 - # A second definition so _import_array isn't marked as used when we use it here. - # Do not use - subject to change any time. - int __pyx_import_array "_import_array"() except -1 - - # - # Macros from ndarrayobject.h - # - bint PyArray_CHKFLAGS(ndarray m, int flags) nogil - bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil - bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil - bint PyArray_ISCONTIGUOUS(ndarray m) nogil - bint PyArray_ISWRITEABLE(ndarray m) nogil - bint PyArray_ISALIGNED(ndarray m) nogil - - int PyArray_NDIM(ndarray) nogil - bint PyArray_ISONESEGMENT(ndarray) nogil - bint PyArray_ISFORTRAN(ndarray) nogil - int PyArray_FORTRANIF(ndarray) nogil - - void* PyArray_DATA(ndarray) nogil - char* PyArray_BYTES(ndarray) nogil - - npy_intp* PyArray_DIMS(ndarray) nogil - npy_intp* PyArray_STRIDES(ndarray) nogil - npy_intp PyArray_DIM(ndarray, size_t) nogil - npy_intp PyArray_STRIDE(ndarray, size_t) nogil - - PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! - PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! - PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. - int PyArray_FLAGS(ndarray) nogil - void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 - void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 - npy_intp PyArray_ITEMSIZE(ndarray) nogil - int PyArray_TYPE(ndarray arr) nogil - - object PyArray_GETITEM(ndarray arr, void *itemptr) - int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1 - - bint PyTypeNum_ISBOOL(int) nogil - bint PyTypeNum_ISUNSIGNED(int) nogil - bint PyTypeNum_ISSIGNED(int) nogil - bint PyTypeNum_ISINTEGER(int) nogil - bint PyTypeNum_ISFLOAT(int) nogil - bint PyTypeNum_ISNUMBER(int) nogil - bint PyTypeNum_ISSTRING(int) nogil - bint PyTypeNum_ISCOMPLEX(int) nogil - bint PyTypeNum_ISFLEXIBLE(int) nogil - bint PyTypeNum_ISUSERDEF(int) nogil - bint PyTypeNum_ISEXTENDED(int) nogil - bint PyTypeNum_ISOBJECT(int) nogil - - npy_intp PyDataType_ELSIZE(dtype) nogil - npy_intp PyDataType_ALIGNMENT(dtype) nogil - PyObject* PyDataType_METADATA(dtype) nogil - PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil - PyObject* PyDataType_NAMES(dtype) nogil - PyObject* PyDataType_FIELDS(dtype) nogil - - bint PyDataType_ISBOOL(dtype) nogil - bint PyDataType_ISUNSIGNED(dtype) nogil - bint PyDataType_ISSIGNED(dtype) nogil - bint PyDataType_ISINTEGER(dtype) nogil - bint PyDataType_ISFLOAT(dtype) nogil - bint PyDataType_ISNUMBER(dtype) nogil - bint PyDataType_ISSTRING(dtype) nogil - bint PyDataType_ISCOMPLEX(dtype) nogil - bint PyDataType_ISFLEXIBLE(dtype) nogil - bint PyDataType_ISUSERDEF(dtype) nogil - bint PyDataType_ISEXTENDED(dtype) nogil - bint PyDataType_ISOBJECT(dtype) nogil - bint PyDataType_HASFIELDS(dtype) nogil - bint PyDataType_HASSUBARRAY(dtype) nogil - npy_uint64 PyDataType_FLAGS(dtype) nogil - - bint PyArray_ISBOOL(ndarray) nogil - bint PyArray_ISUNSIGNED(ndarray) nogil - bint PyArray_ISSIGNED(ndarray) nogil - bint PyArray_ISINTEGER(ndarray) nogil - bint PyArray_ISFLOAT(ndarray) nogil - bint PyArray_ISNUMBER(ndarray) nogil - bint PyArray_ISSTRING(ndarray) nogil - bint PyArray_ISCOMPLEX(ndarray) nogil - bint PyArray_ISFLEXIBLE(ndarray) nogil - bint PyArray_ISUSERDEF(ndarray) nogil - bint PyArray_ISEXTENDED(ndarray) nogil - bint PyArray_ISOBJECT(ndarray) nogil - bint PyArray_HASFIELDS(ndarray) nogil - - bint PyArray_ISVARIABLE(ndarray) nogil - - bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil - bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder - bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder - bint PyArray_ISNOTSWAPPED(ndarray) nogil - bint PyArray_ISBYTESWAPPED(ndarray) nogil - - bint PyArray_FLAGSWAP(ndarray, int) nogil - - bint PyArray_ISCARRAY(ndarray) nogil - bint PyArray_ISCARRAY_RO(ndarray) nogil - bint PyArray_ISFARRAY(ndarray) nogil - bint PyArray_ISFARRAY_RO(ndarray) nogil - bint PyArray_ISBEHAVED(ndarray) nogil - bint PyArray_ISBEHAVED_RO(ndarray) nogil - - - bint PyDataType_ISNOTSWAPPED(dtype) nogil - bint PyDataType_ISBYTESWAPPED(dtype) nogil - - bint PyArray_DescrCheck(object) - - bint PyArray_Check(object) - bint PyArray_CheckExact(object) - - # Cannot be supported due to out arg: - # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) - # bint PyArray_HasArrayInterface(op, out) - - - bint PyArray_IsZeroDim(object) - # Cannot be supported due to ## ## in macro: - # bint PyArray_IsScalar(object, verbatim work) - bint PyArray_CheckScalar(object) - bint PyArray_IsPythonNumber(object) - bint PyArray_IsPythonScalar(object) - bint PyArray_IsAnyScalar(object) - bint PyArray_CheckAnyScalar(object) - - ndarray PyArray_GETCONTIGUOUS(ndarray) - bint PyArray_SAMESHAPE(ndarray, ndarray) nogil - npy_intp PyArray_SIZE(ndarray) nogil - npy_intp PyArray_NBYTES(ndarray) nogil - - object PyArray_FROM_O(object) - object PyArray_FROM_OF(object m, int flags) - object PyArray_FROM_OT(object m, int type) - object PyArray_FROM_OTF(object m, int type, int flags) - object PyArray_FROMANY(object m, int type, int min, int max, int flags) - object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) - object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) - void PyArray_FILLWBYTE(ndarray, int val) - object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) - unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) - bint PyArray_EquivByteorders(int b1, int b2) nogil - object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) - object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) - #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) - object PyArray_ToScalar(void* data, ndarray arr) - - void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil - void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil - void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil - void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil - - # Cannot be supported due to out arg - # void PyArray_DESCR_REPLACE(descr) - - - object PyArray_Copy(ndarray) - object PyArray_FromObject(object op, int type, int min_depth, int max_depth) - object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) - object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) - - object PyArray_Cast(ndarray mp, int type_num) - object PyArray_Take(ndarray ap, object items, int axis) - object PyArray_Put(ndarray ap, object items, object values) - - void PyArray_ITER_RESET(flatiter it) nogil - void PyArray_ITER_NEXT(flatiter it) nogil - void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil - void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil - void* PyArray_ITER_DATA(flatiter it) nogil - bint PyArray_ITER_NOTDONE(flatiter it) nogil - - void PyArray_MultiIter_RESET(broadcast multi) nogil - void PyArray_MultiIter_NEXT(broadcast multi) nogil - void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil - void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil - void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil - void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil - bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil - npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil - int PyArray_MultiIter_NDIM(broadcast multi) nogil - npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil - int PyArray_MultiIter_NUMITER(broadcast multi) nogil - npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil - void** PyArray_MultiIter_ITERS(broadcast multi) nogil - - # Functions from __multiarray_api.h - - # Functions taking dtype and returning object/ndarray are disabled - # for now as they steal dtype references. I'm conservative and disable - # more than is probably needed until it can be checked further. - int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... - int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... - dtype PyArray_DescrFromType (int) - object PyArray_TypeObjectFromType (int) - char * PyArray_Zero (ndarray) - char * PyArray_One (ndarray) - #object PyArray_CastToType (ndarray, dtype, int) - int PyArray_CanCastSafely (int, int) # writes errors - npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors - int PyArray_ObjectType (object, int) except 0 - dtype PyArray_DescrFromObject (object, dtype) - #ndarray* PyArray_ConvertToCommonType (object, int *) - dtype PyArray_DescrFromScalar (object) - dtype PyArray_DescrFromTypeObject (object) - npy_intp PyArray_Size (object) - #object PyArray_Scalar (void *, dtype, object) - #object PyArray_FromScalar (object, dtype) - void PyArray_ScalarAsCtype (object, void *) - #int PyArray_CastScalarToCtype (object, void *, dtype) - #int PyArray_CastScalarDirect (object, dtype, void *, int) - #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) - #object PyArray_FromAny (object, dtype, int, int, int, object) - object PyArray_EnsureArray (object) - object PyArray_EnsureAnyArray (object) - #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) - #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) - #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) - #object PyArray_FromIter (object, dtype, npy_intp) - object PyArray_Return (ndarray) - #object PyArray_GetField (ndarray, dtype, int) - #int PyArray_SetField (ndarray, dtype, int, object) except -1 - object PyArray_Byteswap (ndarray, npy_bool) - object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) - int PyArray_CopyInto (ndarray, ndarray) except -1 - int PyArray_CopyAnyInto (ndarray, ndarray) except -1 - int PyArray_CopyObject (ndarray, object) except -1 - object PyArray_NewCopy (ndarray, NPY_ORDER) - object PyArray_ToList (ndarray) - object PyArray_ToString (ndarray, NPY_ORDER) - int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1 - int PyArray_Dump (object, object, int) except -1 - object PyArray_Dumps (object, int) - int PyArray_ValidType (int) # Cannot error - void PyArray_UpdateFlags (ndarray, int) - object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) - #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) - #dtype PyArray_DescrNew (dtype) - dtype PyArray_DescrNewFromType (int) - double PyArray_GetPriority (object, double) # clears errors as of 1.25 - object PyArray_IterNew (object) - object PyArray_MultiIterNew (int, ...) - - int PyArray_PyIntAsInt (object) except? -1 - npy_intp PyArray_PyIntAsIntp (object) - int PyArray_Broadcast (broadcast) except -1 - int PyArray_FillWithScalar (ndarray, object) except -1 - npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) - dtype PyArray_DescrNewByteorder (dtype, char) - object PyArray_IterAllButAxis (object, int *) - #object PyArray_CheckFromAny (object, dtype, int, int, int, object) - #object PyArray_FromArray (ndarray, dtype, int) - object PyArray_FromInterface (object) - object PyArray_FromStructInterface (object) - #object PyArray_FromArrayAttr (object, dtype, object) - #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) - int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) - npy_bool PyArray_CanCastScalar (type, type) - int PyArray_RemoveSmallest (broadcast) except -1 - int PyArray_ElementStrides (object) - void PyArray_Item_INCREF (char *, dtype) except * - void PyArray_Item_XDECREF (char *, dtype) except * - object PyArray_Transpose (ndarray, PyArray_Dims *) - object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) - object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) - object PyArray_PutMask (ndarray, object, object) - object PyArray_Repeat (ndarray, object, int) - object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) - int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1 - object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) - object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) - object PyArray_ArgMax (ndarray, int, ndarray) - object PyArray_ArgMin (ndarray, int, ndarray) - object PyArray_Reshape (ndarray, object) - object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) - object PyArray_Squeeze (ndarray) - #object PyArray_View (ndarray, dtype, type) - object PyArray_SwapAxes (ndarray, int, int) - object PyArray_Max (ndarray, int, ndarray) - object PyArray_Min (ndarray, int, ndarray) - object PyArray_Ptp (ndarray, int, ndarray) - object PyArray_Mean (ndarray, int, int, ndarray) - object PyArray_Trace (ndarray, int, int, int, int, ndarray) - object PyArray_Diagonal (ndarray, int, int, int) - object PyArray_Clip (ndarray, object, object, ndarray) - object PyArray_Conjugate (ndarray, ndarray) - object PyArray_Nonzero (ndarray) - object PyArray_Std (ndarray, int, int, ndarray, int) - object PyArray_Sum (ndarray, int, int, ndarray) - object PyArray_CumSum (ndarray, int, int, ndarray) - object PyArray_Prod (ndarray, int, int, ndarray) - object PyArray_CumProd (ndarray, int, int, ndarray) - object PyArray_All (ndarray, int, ndarray) - object PyArray_Any (ndarray, int, ndarray) - object PyArray_Compress (ndarray, object, int, ndarray) - object PyArray_Flatten (ndarray, NPY_ORDER) - object PyArray_Ravel (ndarray, NPY_ORDER) - npy_intp PyArray_MultiplyList (npy_intp *, int) - int PyArray_MultiplyIntList (int *, int) - void * PyArray_GetPtr (ndarray, npy_intp*) - int PyArray_CompareLists (npy_intp *, npy_intp *, int) - #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) - int PyArray_Free (object, void *) - #int PyArray_Converter (object, object*) - int PyArray_IntpFromSequence (object, npy_intp *, int) except -1 - object PyArray_Concatenate (object, int) - object PyArray_InnerProduct (object, object) - object PyArray_MatrixProduct (object, object) - object PyArray_Correlate (object, object, int) - #int PyArray_DescrConverter (object, dtype*) except 0 - #int PyArray_DescrConverter2 (object, dtype*) except 0 - int PyArray_IntpConverter (object, PyArray_Dims *) except 0 - #int PyArray_BufferConverter (object, chunk) except 0 - int PyArray_AxisConverter (object, int *) except 0 - int PyArray_BoolConverter (object, npy_bool *) except 0 - int PyArray_ByteorderConverter (object, char *) except 0 - int PyArray_OrderConverter (object, NPY_ORDER *) except 0 - unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors - #object PyArray_Zeros (int, npy_intp *, dtype, int) - #object PyArray_Empty (int, npy_intp *, dtype, int) - object PyArray_Where (object, object, object) - object PyArray_Arange (double, double, double, int) - #object PyArray_ArangeObj (object, object, object, dtype) - int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0 - object PyArray_LexSort (object, int) - object PyArray_Round (ndarray, int, ndarray) - unsigned char PyArray_EquivTypenums (int, int) - int PyArray_RegisterDataType (dtype) except -1 - int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1 - int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1 - #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) - object PyArray_IntTupleFromIntp (int, npy_intp *) - int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0 - #int PyArray_OutputConverter (object, ndarray*) except 0 - object PyArray_BroadcastToShape (object, npy_intp *, int) - #int PyArray_DescrAlignConverter (object, dtype*) except 0 - #int PyArray_DescrAlignConverter2 (object, dtype*) except 0 - int PyArray_SearchsideConverter (object, void *) except 0 - object PyArray_CheckAxis (ndarray, int *, int) - npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) - int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. - - # The memory handler functions require the NumPy 1.22 API - # and may require defining NPY_TARGET_VERSION - ctypedef struct PyDataMemAllocator: - void *ctx - void* (*malloc) (void *ctx, size_t size) - void* (*calloc) (void *ctx, size_t nelem, size_t elsize) - void* (*realloc) (void *ctx, void *ptr, size_t new_size) - void (*free) (void *ctx, void *ptr, size_t size) - - ctypedef struct PyDataMem_Handler: - char* name - npy_uint8 version - PyDataMemAllocator allocator - - object PyDataMem_SetHandler(object handler) - object PyDataMem_GetHandler() - - # additional datetime related functions are defined below - - -# Typedefs that matches the runtime dtype objects in -# the numpy module. - -# The ones that are commented out needs an IFDEF function -# in Cython to enable them only on the right systems. - -ctypedef npy_int8 int8_t -ctypedef npy_int16 int16_t -ctypedef npy_int32 int32_t -ctypedef npy_int64 int64_t - -ctypedef npy_uint8 uint8_t -ctypedef npy_uint16 uint16_t -ctypedef npy_uint32 uint32_t -ctypedef npy_uint64 uint64_t - -ctypedef npy_float32 float32_t -ctypedef npy_float64 float64_t -#ctypedef npy_float80 float80_t -#ctypedef npy_float128 float128_t - -ctypedef float complex complex64_t -ctypedef double complex complex128_t - -ctypedef npy_longlong longlong_t -ctypedef npy_ulonglong ulonglong_t - -ctypedef npy_intp intp_t -ctypedef npy_uintp uintp_t - -ctypedef npy_double float_t -ctypedef npy_double double_t -ctypedef npy_longdouble longdouble_t - -ctypedef float complex cfloat_t -ctypedef double complex cdouble_t -ctypedef double complex complex_t -ctypedef long double complex clongdouble_t - -cdef inline object PyArray_MultiIterNew1(a): - return PyArray_MultiIterNew(1, a) - -cdef inline object PyArray_MultiIterNew2(a, b): - return PyArray_MultiIterNew(2, a, b) - -cdef inline object PyArray_MultiIterNew3(a, b, c): - return PyArray_MultiIterNew(3, a, b, c) - -cdef inline object PyArray_MultiIterNew4(a, b, c, d): - return PyArray_MultiIterNew(4, a, b, c, d) - -cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - return PyArray_MultiIterNew(5, a, b, c, d, e) - -cdef inline tuple PyDataType_SHAPE(dtype d): - if PyDataType_HASSUBARRAY(d): - return d.subarray.shape - else: - return () - - -cdef extern from "numpy/ndarrayobject.h": - PyTypeObject PyTimedeltaArrType_Type - PyTypeObject PyDatetimeArrType_Type - ctypedef int64_t npy_timedelta - ctypedef int64_t npy_datetime - -cdef extern from "numpy/ndarraytypes.h": - ctypedef struct PyArray_DatetimeMetaData: - NPY_DATETIMEUNIT base - int64_t num - - ctypedef struct npy_datetimestruct: - int64_t year - int32_t month, day, hour, min, sec, us, ps, as - - # Iterator API added in v1.6 - # - # These don't match the definition in the C API because Cython can't wrap - # function pointers that return functions. - # https://github.com/cython/cython/issues/6720 - ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil - ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil - -cdef extern from "numpy/arrayscalars.h": - - # abstract types - ctypedef class numpy.generic [object PyObject]: - pass - ctypedef class numpy.number [object PyObject]: - pass - ctypedef class numpy.integer [object PyObject]: - pass - ctypedef class numpy.signedinteger [object PyObject]: - pass - ctypedef class numpy.unsignedinteger [object PyObject]: - pass - ctypedef class numpy.inexact [object PyObject]: - pass - ctypedef class numpy.floating [object PyObject]: - pass - ctypedef class numpy.complexfloating [object PyObject]: - pass - ctypedef class numpy.flexible [object PyObject]: - pass - ctypedef class numpy.character [object PyObject]: - pass - - ctypedef struct PyDatetimeScalarObject: - # PyObject_HEAD - npy_datetime obval - PyArray_DatetimeMetaData obmeta - - ctypedef struct PyTimedeltaScalarObject: - # PyObject_HEAD - npy_timedelta obval - PyArray_DatetimeMetaData obmeta - - ctypedef enum NPY_DATETIMEUNIT: - NPY_FR_Y - NPY_FR_M - NPY_FR_W - NPY_FR_D - NPY_FR_B - NPY_FR_h - NPY_FR_m - NPY_FR_s - NPY_FR_ms - NPY_FR_us - NPY_FR_ns - NPY_FR_ps - NPY_FR_fs - NPY_FR_as - NPY_FR_GENERIC - - -cdef extern from "numpy/arrayobject.h": - # These are part of the C-API defined in `__multiarray_api.h` - - # NumPy internal definitions in datetime_strings.c: - int get_datetime_iso_8601_strlen "NpyDatetime_GetDatetimeISO8601StrLen" ( - int local, NPY_DATETIMEUNIT base) - int make_iso_8601_datetime "NpyDatetime_MakeISO8601Datetime" ( - npy_datetimestruct *dts, char *outstr, npy_intp outlen, - int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, - NPY_CASTING casting) except -1 - - # NumPy internal definition in datetime.c: - # May return 1 to indicate that object does not appear to be a datetime - # (returns 0 on success). - int convert_pydatetime_to_datetimestruct "NpyDatetime_ConvertPyDateTimeToDatetimeStruct" ( - PyObject *obj, npy_datetimestruct *out, - NPY_DATETIMEUNIT *out_bestunit, int apply_tzinfo) except -1 - int convert_datetime64_to_datetimestruct "NpyDatetime_ConvertDatetime64ToDatetimeStruct" ( - PyArray_DatetimeMetaData *meta, npy_datetime dt, - npy_datetimestruct *out) except -1 - int convert_datetimestruct_to_datetime64 "NpyDatetime_ConvertDatetimeStructToDatetime64"( - PyArray_DatetimeMetaData *meta, const npy_datetimestruct *dts, - npy_datetime *out) except -1 - - -# -# ufunc API +# See __init__.cython-30.pxd for the real Cython header # -cdef extern from "numpy/ufuncobject.h": - - ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) - - ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: - cdef: - int nin, nout, nargs - int identity - PyUFuncGenericFunction *functions - void **data - int ntypes - int check_return - char *name - char *types - char *doc - void *ptr - PyObject *obj - PyObject *userloops - - cdef enum: - PyUFunc_Zero - PyUFunc_One - PyUFunc_None - # deprecated - UFUNC_FPE_DIVIDEBYZERO - UFUNC_FPE_OVERFLOW - UFUNC_FPE_UNDERFLOW - UFUNC_FPE_INVALID - # use these instead - NPY_FPE_DIVIDEBYZERO - NPY_FPE_OVERFLOW - NPY_FPE_UNDERFLOW - NPY_FPE_INVALID - - object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, - void **, char *, int, int, int, int, char *, char *, int) - int PyUFunc_RegisterLoopForType(ufunc, int, - PyUFuncGenericFunction, int *, void *) except -1 - void PyUFunc_f_f_As_d_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_d_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_f_f \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_g_g \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_F_F_As_D_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_F_F \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_D_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_G_G \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_O_O \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_ff_f_As_dd_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_ff_f \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_dd_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_gg_g \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_FF_F_As_DD_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_DD_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_FF_F \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_GG_G \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_OO_O \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_O_O_method \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_OO_O_method \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_On_Om \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_clearfperr() - int PyUFunc_getfperr() - int PyUFunc_ReplaceLoopBySignature \ - (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) - object PyUFunc_FromFuncAndDataAndSignature \ - (PyUFuncGenericFunction *, void **, char *, int, int, int, - int, char *, char *, int, char *) - - int _import_umath() except -1 - -cdef inline void set_array_base(ndarray arr, object base): - Py_INCREF(base) # important to do this before stealing the reference below! - PyArray_SetBaseObject(arr, base) - -cdef inline object get_array_base(ndarray arr): - base = PyArray_BASE(arr) - if base is NULL: - return None - return base - -# Versions of the import_* functions which are more suitable for -# Cython code. -cdef inline int import_array() except -1: - try: - __pyx_import_array() - except Exception: - raise ImportError("numpy._core.multiarray failed to import") - -cdef inline int import_umath() except -1: - try: - _import_umath() - except Exception: - raise ImportError("numpy._core.umath failed to import") - -cdef inline int import_ufunc() except -1: - try: - _import_umath() - except Exception: - raise ImportError("numpy._core.umath failed to import") - - -cdef inline bint is_timedelta64_object(object obj): - """ - Cython equivalent of `isinstance(obj, np.timedelta64)` - - Parameters - ---------- - obj : object - - Returns - ------- - bool - """ - return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) - - -cdef inline bint is_datetime64_object(object obj): - """ - Cython equivalent of `isinstance(obj, np.datetime64)` - - Parameters - ---------- - obj : object - - Returns - ------- - bool - """ - return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) - - -cdef inline npy_datetime get_datetime64_value(object obj) nogil: - """ - returns the int64 value underlying scalar numpy datetime64 object - - Note that to interpret this as a datetime, the corresponding unit is - also needed. That can be found using `get_datetime64_unit`. - """ - return (obj).obval - - -cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: - """ - returns the int64 value underlying scalar numpy timedelta64 object - """ - return (obj).obval - - -cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: - """ - returns the unit part of the dtype for a numpy datetime64 object. - """ - return (obj).obmeta.base - - -cdef extern from "numpy/arrayobject.h": - - ctypedef struct NpyIter: - pass - - cdef enum: - NPY_FAIL - NPY_SUCCEED - - cdef enum: - # Track an index representing C order - NPY_ITER_C_INDEX - # Track an index representing Fortran order - NPY_ITER_F_INDEX - # Track a multi-index - NPY_ITER_MULTI_INDEX - # User code external to the iterator does the 1-dimensional innermost loop - NPY_ITER_EXTERNAL_LOOP - # Convert all the operands to a common data type - NPY_ITER_COMMON_DTYPE - # Operands may hold references, requiring API access during iteration - NPY_ITER_REFS_OK - # Zero-sized operands should be permitted, iteration checks IterSize for 0 - NPY_ITER_ZEROSIZE_OK - # Permits reductions (size-0 stride with dimension size > 1) - NPY_ITER_REDUCE_OK - # Enables sub-range iteration - NPY_ITER_RANGED - # Enables buffering - NPY_ITER_BUFFERED - # When buffering is enabled, grows the inner loop if possible - NPY_ITER_GROWINNER - # Delay allocation of buffers until first Reset* call - NPY_ITER_DELAY_BUFALLOC - # When NPY_KEEPORDER is specified, disable reversing negative-stride axes - NPY_ITER_DONT_NEGATE_STRIDES - NPY_ITER_COPY_IF_OVERLAP - # The operand will be read from and written to - NPY_ITER_READWRITE - # The operand will only be read from - NPY_ITER_READONLY - # The operand will only be written to - NPY_ITER_WRITEONLY - # The operand's data must be in native byte order - NPY_ITER_NBO - # The operand's data must be aligned - NPY_ITER_ALIGNED - # The operand's data must be contiguous (within the inner loop) - NPY_ITER_CONTIG - # The operand may be copied to satisfy requirements - NPY_ITER_COPY - # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements - NPY_ITER_UPDATEIFCOPY - # Allocate the operand if it is NULL - NPY_ITER_ALLOCATE - # If an operand is allocated, don't use any subtype - NPY_ITER_NO_SUBTYPE - # This is a virtual array slot, operand is NULL but temporary data is there - NPY_ITER_VIRTUAL - # Require that the dimension match the iterator dimensions exactly - NPY_ITER_NO_BROADCAST - # A mask is being used on this array, affects buffer -> array copy - NPY_ITER_WRITEMASKED - # This array is the mask for all WRITEMASKED operands - NPY_ITER_ARRAYMASK - # Assume iterator order data access for COPY_IF_OVERLAP - NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE - - # construction and destruction functions - NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order, - NPY_CASTING casting, dtype datatype) except NULL - NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, - NPY_ORDER order, NPY_CASTING casting, npy_uint32* - op_flags, PyArray_Descr** op_dtypes) except NULL - NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, - npy_uint32 flags, NPY_ORDER order, - NPY_CASTING casting, npy_uint32* op_flags, - PyArray_Descr** op_dtypes, int oa_ndim, - int** op_axes, const npy_intp* itershape, - npy_intp buffersize) except NULL - NpyIter* NpyIter_Copy(NpyIter* it) except NULL - int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL - int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL - int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL - int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL - int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL - int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart, - npy_intp iend, char** errmsg) except NPY_FAIL - int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL - int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL - int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL - npy_intp NpyIter_GetIterSize(NpyIter* it) nogil - npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil - void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart, - npy_intp* iend) nogil - int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL - npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil - npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil - npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil - npy_bool NpyIter_HasIndex(NpyIter* it) nogil - npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil - npy_bool NpyIter_IsBuffered(NpyIter* it) nogil - npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil - npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil - int NpyIter_GetNDim(NpyIter* it) nogil - int NpyIter_GetNOp(NpyIter* it) nogil - npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL - int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil - PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it) - PyArrayObject** NpyIter_GetOperandArray(NpyIter* it) - ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i) - void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags) - void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags) - int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize, - npy_intp* outstrides) except NPY_FAIL - npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil - # functions for iterating an NpyIter object - # - # These don't match the definition in the C API because Cython can't wrap - # function pointers that return functions. - NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL - NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, - char** errmsg) except NULL - char** NpyIter_GetDataPtrArray(NpyIter* it) nogil - char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil - npy_intp* NpyIter_GetIndexPtr(NpyIter* it) - npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil - npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil - void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil - npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil - void NpyIter_DebugPrint(NpyIter* it) - -# NpyString API -cdef extern from "numpy/ndarraytypes.h": - ctypedef struct npy_string_allocator: - pass - - ctypedef struct npy_packed_static_string: - pass - - ctypedef struct npy_static_string: - size_t size - const char *buf - - ctypedef struct PyArray_StringDTypeObject: - PyArray_Descr base - PyObject *na_object - char coerce - char has_nan_na - char has_string_na - char array_owned - npy_static_string default_string - npy_static_string na_name - npy_string_allocator *allocator - -cdef extern from "numpy/arrayobject.h": - npy_string_allocator *NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) - void NpyString_acquire_allocators(size_t n_descriptors, PyArray_Descr *const descrs[], npy_string_allocator *allocators[]) - void NpyString_release_allocator(npy_string_allocator *allocator) - void NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) - int NpyString_load(npy_string_allocator *allocator, const npy_packed_static_string *packed_string, npy_static_string *unpacked_string) - int NpyString_pack_null(npy_string_allocator *allocator, npy_packed_static_string *packed_string) - int NpyString_pack(npy_string_allocator *allocator, npy_packed_static_string *packed_string, const char *buf, size_t size) +# intentionally created compiler error that only triggers on Cython < 3.0.0 +DEF err = int('Build aborted: the NumPy Cython headers require Cython 3.0.0 or newer.') diff --git a/numpy/__init__.py b/numpy/__init__.py index ef7c1ed7678a..ce0452ffe8d0 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -52,9 +52,6 @@ Polynomial tools testing NumPy testing tools -distutils - Enhancements to distutils with support for - Fortran compilers support and more (for Python <= 3.11) Utilities --------- @@ -578,7 +575,6 @@ hsplit, kron, put_along_axis, - row_stack, split, take_along_axis, tile, @@ -624,8 +620,8 @@ from .matrixlib import asmatrix, bmat, matrix # public submodules are imported lazily, therefore are accessible from - # __getattr__. Note that `distutils` (deprecated) and `array_api` - # (experimental label) are not added here, because `from numpy import *` + # __getattr__. Note that `array_api` + # (experimental label) is not added here, because `from numpy import *` # must not raise any warnings - that's too disruptive. __numpy_submodules__ = { "linalg", "fft", "dtypes", "random", "polynomial", "ma", @@ -671,7 +667,7 @@ # import with `from numpy import *`. __future_scalars__ = {"str", "bytes", "object"} - __array_api_version__ = "2024.12" + __array_api_version__ = "2025.12" from ._array_api_info import __array_namespace_info__ @@ -747,23 +743,12 @@ def __getattr__(attr): elif attr == "char": import numpy.char as char return char - elif attr == "array_api": - raise AttributeError("`numpy.array_api` is not available from " - "numpy 2.0 onwards", name=None) elif attr == "core": import numpy.core as core return core elif attr == "strings": import numpy.strings as strings return strings - elif attr == "distutils": - if 'distutils' in __numpy_submodules__: - import numpy.distutils as distutils - return distutils - else: - raise AttributeError("`numpy.distutils` is not available from " - "Python 3.12 onwards", name=None) - if attr in __future_scalars__: # And future warnings for those that will change, but also give # the AttributeError @@ -781,14 +766,6 @@ def __getattr__(attr): name=None ) - if attr == "chararray": - warnings.warn( - "`np.chararray` is deprecated and will be removed from " - "the main namespace in the future. Use an array with a string " - "or bytes dtype instead.", DeprecationWarning, stacklevel=2) - import numpy.char as char - return char.chararray - raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") def __dir__(): @@ -797,7 +774,7 @@ def __dir__(): ) public_symbols -= { "matrixlib", "matlib", "tests", "conftest", "version", - "distutils", "array_api" + "array_api" } return list(public_symbols) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e19b130d73b6..4875e64dccc1 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1,18 +1,16 @@ # ruff: noqa: I001 -import builtins -import sys -import mmap import ctypes as ct -import array as _array import datetime as dt import inspect +import sys from abc import abstractmethod -from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias +# Aliases for builtins shadowed by classes to avoid annotations resolving to class members by ty +from builtins import bool as py_bool, str as py_str, type as py_type from decimal import Decimal from fractions import Fraction +from types import EllipsisType, ModuleType, MappingProxyType, GenericAlias from uuid import UUID -import numpy as np from numpy.__config__ import show as show_config from numpy._pytesttester import PytestTester from numpy._core._internal import _ctypes @@ -21,7 +19,6 @@ from numpy._typing import ( # type: ignore[deprecated] # Arrays ArrayLike, NDArray, - _SupportsArray, _NestedSequence, _ArrayLike, _ArrayLikeBool_co, @@ -90,26 +87,16 @@ from numpy._typing import ( # type: ignore[deprecated] _Float64Codes, _Complex64Codes, _Complex128Codes, - _ByteCodes, - _ShortCodes, _IntCCodes, _IntPCodes, _LongCodes, _LongLongCodes, - _UByteCodes, - _UShortCodes, _UIntCCodes, _UIntPCodes, _ULongCodes, _ULongLongCodes, - _HalfCodes, - _SingleCodes, - _DoubleCodes, _LongDoubleCodes, - _CSingleCodes, - _CDoubleCodes, _CLongDoubleCodes, - _DT64Codes, _TD64Codes, _StrCodes, _BytesCodes, @@ -130,6 +117,15 @@ from numpy._typing import ( # type: ignore[deprecated] _UFunc_Nin2_Nout2, _GUFunc_Nin2_Nout1, ) +from numpy._typing._char_codes import ( + _DT64Codes_any, + _DT64Codes_date, + _DT64Codes_datetime, + _DT64Codes_int, + _TD64Codes_any, + _TD64Codes_int, + _TD64Codes_timedelta, +) # NOTE: Numpy's mypy plugin is used for removing the types unavailable to the specific platform from numpy._typing._extended_precision import ( @@ -142,6 +138,7 @@ from numpy._typing._extended_precision import ( from numpy._array_api_info import __array_namespace_info__ from collections.abc import ( + Buffer, Callable, Iterable, Iterator, @@ -149,19 +146,6 @@ from collections.abc import ( Sequence, ) -if sys.version_info >= (3, 12): - from collections.abc import Buffer as _SupportsBuffer -else: - _SupportsBuffer: TypeAlias = ( - bytes - | bytearray - | memoryview - | _array.array[Any] - | mmap.mmap - | NDArray[Any] - | generic - ) - from typing import ( Any, ClassVar, @@ -177,10 +161,10 @@ from typing import ( SupportsFloat, SupportsInt, SupportsIndex, - TypeAlias, TypedDict, final, overload, + override, type_check_only, ) @@ -189,7 +173,7 @@ from typing import ( # library include `typing_extensions` stubs: # https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite -from typing_extensions import CapsuleType, TypeVar, deprecated, override +from typing_extensions import CapsuleType, TypeVar, deprecated from numpy import ( char, @@ -217,8 +201,6 @@ from numpy import ( matrixlib as matrixlib, version as version, ) -if sys.version_info < (3, 12): - from numpy import distutils as distutils from numpy._core.records import ( record, @@ -315,7 +297,7 @@ from numpy._core.getlimits import ( finfo, iinfo, ) - +from numpy._core.memmap import memmap from numpy._core.multiarray import ( array, empty_like, @@ -342,6 +324,7 @@ from numpy._core.multiarray import ( ascontiguousarray, asfortranarray, arange, + busdaycalendar, busday_count, busday_offset, datetime_as_string, @@ -353,6 +336,8 @@ from numpy._core.multiarray import ( promote_types, fromstring, frompyfunc, + flatiter, + nditer, nested_iters, flagsobj, ) @@ -432,7 +417,7 @@ from numpy.lib._arraysetops_impl import ( unique_values, ) -from numpy.lib._function_base_impl import ( # type: ignore[deprecated] +from numpy.lib._function_base_impl import ( select, piecewise, trim_zeros, @@ -532,12 +517,12 @@ from numpy.lib._polynomial_impl import ( polymul, polydiv, polyval, + poly1d, polyfit, ) -from numpy.lib._shape_base_impl import ( # type: ignore[deprecated] +from numpy.lib._shape_base_impl import ( column_stack, - row_stack, dstack, array_split, split, @@ -609,7 +594,7 @@ from numpy.matrixlib import ( matrix, ) -__all__ = [ # noqa: RUF022 +__all__ = [ # __numpy_submodules__ "char", "core", "ctypeslib", "dtypes", "exceptions", "f2py", "fft", "lib", "linalg", "ma", "polynomial", "random", "rec", "strings", "test", "testing", "typing", @@ -696,7 +681,7 @@ __all__ = [ # noqa: RUF022 # lib._shape_base_impl.__all__ "column_stack", "dstack", "array_split", "split", "hsplit", "vsplit", "dsplit", "apply_over_axes", "expand_dims", "apply_along_axis", "kron", "tile", - "take_along_axis", "put_along_axis", "row_stack", + "take_along_axis", "put_along_axis", # lib._type_check_impl.__all__ "iscomplexobj", "isrealobj", "imag", "iscomplex", "isreal", "nan_to_num", "real", "real_if_close", "typename", "mintypecode", "common_type", @@ -726,156 +711,96 @@ __all__ = [ # noqa: RUF022 "emath", "show_config", "__version__", "__array_namespace_info__", ] # fmt: skip -### Constrained types (for internal use only) -# Only use these for functions; never as generic type parameter. - -_AnyStr = TypeVar("_AnyStr", LiteralString, str, bytes) -_AnyShapeT = TypeVar( - "_AnyShapeT", - tuple[()], # 0-d - tuple[int], # 1-d - tuple[int, int], # 2-d - tuple[int, int, int], # 3-d - tuple[int, int, int, int], # 4-d - tuple[int, int, int, int, int], # 5-d - tuple[int, int, int, int, int, int], # 6-d - tuple[int, int, int, int, int, int, int], # 7-d - tuple[int, int, int, int, int, int, int, int], # 8-d - tuple[int, ...], # N-d -) -_AnyTD64Item = TypeVar("_AnyTD64Item", dt.timedelta, int, None, dt.timedelta | int | None) -_AnyDT64Arg = TypeVar("_AnyDT64Arg", dt.datetime, dt.date, None) -_AnyDT64Item = TypeVar("_AnyDT64Item", dt.datetime, dt.date, int, None, dt.date, int | None) -_AnyDate = TypeVar("_AnyDate", dt.date, dt.datetime) -_AnyDateOrTime = TypeVar("_AnyDateOrTime", dt.date, dt.datetime, dt.timedelta) - -### Type parameters (for internal use only) +### Type parameters (with defaults); for internal use only -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -_T_contra = TypeVar("_T_contra", contravariant=True) -_RealT_co = TypeVar("_RealT_co", covariant=True) -_ImagT_co = TypeVar("_ImagT_co", covariant=True) - -_DTypeT = TypeVar("_DTypeT", bound=dtype) -_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) -_FlexDTypeT = TypeVar("_FlexDTypeT", bound=dtype[flexible]) - -_ArrayT = TypeVar("_ArrayT", bound=ndarray) _ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, default=ndarray, covariant=True) -_BoolArrayT = TypeVar("_BoolArrayT", bound=NDArray[np.bool]) -_IntegerArrayT = TypeVar("_IntegerArrayT", bound=NDArray[integer]) -_IntegralArrayT = TypeVar("_IntegralArrayT", bound=NDArray[np.bool | integer | object_]) -_FloatingArrayT = TypeVar("_FloatingArrayT", bound=NDArray[floating]) -_FloatingTimedeltaArrayT = TypeVar("_FloatingTimedeltaArrayT", bound=NDArray[floating | timedelta64]) -_ComplexFloatingArrayT = TypeVar("_ComplexFloatingArrayT", bound=NDArray[complexfloating]) -_InexactArrayT = TypeVar("_InexactArrayT", bound=NDArray[inexact]) -_InexactTimedeltaArrayT = TypeVar("_InexactTimedeltaArrayT", bound=NDArray[inexact | timedelta64]) -_NumberArrayT = TypeVar("_NumberArrayT", bound=NDArray[number]) -_NumberCharacterArrayT = TypeVar("_NumberCharacterArrayT", bound=ndarray[Any, dtype[number | character] | dtypes.StringDType]) -_TimedeltaArrayT = TypeVar("_TimedeltaArrayT", bound=NDArray[timedelta64]) -_TimeArrayT = TypeVar("_TimeArrayT", bound=NDArray[datetime64 | timedelta64]) -_ObjectArrayT = TypeVar("_ObjectArrayT", bound=NDArray[object_]) -_BytesArrayT = TypeVar("_BytesArrayT", bound=NDArray[bytes_]) -_StringArrayT = TypeVar("_StringArrayT", bound=ndarray[Any, dtype[str_] | dtypes.StringDType]) -_RealArrayT = TypeVar("_RealArrayT", bound=NDArray[floating | integer | timedelta64 | np.bool | object_]) -_NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number | timedelta64 | object_]) - -_ShapeT = TypeVar("_ShapeT", bound=_Shape) -_Shape1T = TypeVar("_Shape1T", bound=tuple[int, *tuple[int, ...]]) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, default=_2D, covariant=True) -_1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], *tuple[L[1], ...]]) # (1,) | (1, 1) | (1, 1, 1) | ... - -_ScalarT = TypeVar("_ScalarT", bound=generic) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, default=Any, covariant=True) -_NumberT = TypeVar("_NumberT", bound=number) -_InexactT = TypeVar("_InexactT", bound=inexact) -_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) -_IntegerT = TypeVar("_IntegerT", bound=integer) -_NonObjectScalarT = TypeVar("_NonObjectScalarT", bound=np.bool | number | flexible | datetime64 | timedelta64) -_NBit = TypeVar("_NBit", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] -_NBit1 = TypeVar("_NBit1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] -_NBit2 = TypeVar("_NBit2", bound=NBitBase, default=_NBit1) # pyright: ignore[reportDeprecated] +# intentionally invariant +_NBitT = TypeVar("_NBitT", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBitT1 = TypeVar("_NBitT1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBitT2 = TypeVar("_NBitT2", bound=NBitBase, default=_NBitT1) # pyright: ignore[reportDeprecated] _ItemT_co = TypeVar("_ItemT_co", default=Any, covariant=True) -_BoolItemT = TypeVar("_BoolItemT", bound=builtins.bool) -_BoolItemT_co = TypeVar("_BoolItemT_co", bound=builtins.bool, default=builtins.bool, covariant=True) -_NumberItemT_co = TypeVar("_NumberItemT_co", bound=complex, default=int | float | complex, covariant=True) -_InexactItemT_co = TypeVar("_InexactItemT_co", bound=complex, default=float | complex, covariant=True) -_FlexibleItemT_co = TypeVar( - "_FlexibleItemT_co", - bound=_CharLike_co | tuple[Any, ...], - default=_CharLike_co | tuple[Any, ...], - covariant=True, -) -_CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=_CharLike_co, default=_CharLike_co, covariant=True) -_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=dt.timedelta | int | None, default=dt.timedelta | int | None, covariant=True) -_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=dt.date | int | None, default=dt.date | int | None, covariant=True) -_TD64UnitT = TypeVar("_TD64UnitT", bound=_TD64Unit, default=_TD64Unit) -_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[integer | np.bool]) +_BoolItemT_co = TypeVar("_BoolItemT_co", bound=py_bool, default=py_bool, covariant=True) +_NumberItemT_co = TypeVar("_NumberItemT_co", bound=complex, default=Any, covariant=True) # either int, float, or complex +_InexactItemT_co = TypeVar("_InexactItemT_co", bound=complex, default=Any, covariant=True) # either float or complex +_FlexibleItemT_co = TypeVar("_FlexibleItemT_co", bound=bytes | str | tuple[Any, ...], default=Any, covariant=True) +_CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=bytes | str, default=Any, covariant=True) +_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=_TD64Item, default=Any, covariant=True) +_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=_DT64Item, default=Any, covariant=True) ### Type Aliases (for internal use only) -_Falsy: TypeAlias = L[False, 0] | np.bool[L[False]] -_Truthy: TypeAlias = L[True, 1] | np.bool[L[True]] +type _Falsy = L[False, 0] | bool_[L[False]] +type _Truthy = L[True, 1] | bool_[L[True]] + +type _1D = tuple[int] +type _2D = tuple[int, int] +type _3D = tuple[int, int, int] + +type _2Tuple[T] = tuple[T, T] +type _3Tuple[T] = tuple[T, T, T] -_1D: TypeAlias = tuple[int] -_2D: TypeAlias = tuple[int, int] -_2Tuple: TypeAlias = tuple[_T, _T] +type _ArrayUInt_co = NDArray[unsignedinteger | bool_] +type _ArrayInt_co = NDArray[integer | bool_] +type _ArrayFloat64_co = NDArray[floating[_64Bit] | float32 | float16 | integer | bool_] +type _ArrayFloat_co = NDArray[floating | integer | bool_] +type _ArrayComplex128_co = NDArray[number[_64Bit] | number[_32Bit] | float16 | integer | bool_] +type _ArrayComplex_co = NDArray[inexact | integer | bool_] +type _ArrayNumber_co = NDArray[number | bool_] +type _ArrayTD64_co = NDArray[timedelta64 | integer | bool_] -_ArrayUInt_co: TypeAlias = NDArray[unsignedinteger | np.bool] -_ArrayInt_co: TypeAlias = NDArray[integer | np.bool] -_ArrayFloat64_co: TypeAlias = NDArray[floating[_64Bit] | float32 | float16 | integer | np.bool] -_ArrayFloat_co: TypeAlias = NDArray[floating | integer | np.bool] -_ArrayComplex128_co: TypeAlias = NDArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] -_ArrayComplex_co: TypeAlias = NDArray[inexact | integer | np.bool] -_ArrayNumber_co: TypeAlias = NDArray[number | np.bool] -_ArrayTD64_co: TypeAlias = NDArray[timedelta64 | integer | np.bool] +type _ArrayString = ndarray[_AnyShape, dtype[str_] | dtypes.StringDType] +type _ArrayNumeric = NDArray[number | timedelta64 | object_] -_Float64_co: TypeAlias = float | floating[_64Bit] | float32 | float16 | integer | np.bool -_Complex64_co: TypeAlias = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | np.bool -_Complex128_co: TypeAlias = complex | number[_64Bit] | _Complex64_co +type _ScalarNotObject = bool_ | number | flexible | datetime64 | timedelta64 -_ToIndex: TypeAlias = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None -_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] +type _Float64_co = float | floating[_64Bit] | float32 | float16 | integer | bool_ +type _Complex64_co = number[_32Bit] | number[_16Bit] | number[_8Bit] | py_bool | bool_ +type _Complex128_co = complex | number[_64Bit] | _Complex64_co -_UnsignedIntegerCType: TypeAlias = type[ +type _ToIndex = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None +type _ToIndices = _ToIndex | tuple[_ToIndex, ...] + +type _UnsignedIntegerCType = type[ ct.c_uint8 | ct.c_uint16 | ct.c_uint32 | ct.c_uint64 | ct.c_ushort | ct.c_uint | ct.c_ulong | ct.c_ulonglong | ct.c_size_t | ct.c_void_p ] # fmt: skip -_SignedIntegerCType: TypeAlias = type[ +type _SignedIntegerCType = type[ ct.c_int8 | ct.c_int16 | ct.c_int32 | ct.c_int64 | ct.c_short | ct.c_int | ct.c_long | ct.c_longlong | ct.c_ssize_t ] # fmt: skip -_FloatingCType: TypeAlias = type[ct.c_float | ct.c_double | ct.c_longdouble] -_IntegerCType: TypeAlias = _UnsignedIntegerCType | _SignedIntegerCType +type _FloatingCType = type[ct.c_float | ct.c_double | ct.c_longdouble] +type _IntegerCType = _UnsignedIntegerCType | _SignedIntegerCType # some commonly used builtin types that are known to result in a # `dtype[object_]`, when their *type* is passed to the `dtype` constructor # NOTE: `builtins.object` should not be included here -_BuiltinObjectLike: TypeAlias = ( +type _BuiltinObjectLike = ( slice | Decimal | Fraction | UUID | dt.date | dt.time | dt.timedelta | dt.tzinfo | tuple[Any, ...] | list[Any] | set[Any] | frozenset[Any] | dict[Any, Any] ) # fmt: skip # Introduce an alias for `dtype` to avoid naming conflicts. -_dtype: TypeAlias = dtype[_ScalarT] +# NOTE: This should _not_ be `Final[_]`, `_: TypeAlias`, or `type _` +_dtype = dtype -_ByteOrderChar: TypeAlias = L["<", ">", "=", "|"] +type _ByteOrderChar = L["<", ">", "=", "|"] # can be anything, is case-insensitive, and only the first character matters -_ByteOrder: TypeAlias = L[ +type _ByteOrder = L[ "S", # swap the current order (default) "<", "L", "little", # little-endian ">", "B", "big", # big endian "=", "N", "native", # native order "|", "I", # ignore ] # fmt: skip -_DTypeKind: TypeAlias = L[ +type _DTypeKind = L[ "b", # boolean "i", # signed integer "u", # unsigned integer @@ -889,7 +814,7 @@ _DTypeKind: TypeAlias = L[ "V", # void "T", # unicode-string (variable-width) ] -_DTypeChar: TypeAlias = L[ +type _DTypeChar = L[ "?", # bool "b", # byte "B", # ubyte @@ -910,7 +835,6 @@ _DTypeChar: TypeAlias = L[ "G", # clongdouble "O", # object "S", # bytes_ (S0) - "a", # bytes_ (deprecated) "U", # str_ "V", # void "M", # datetime64 @@ -918,7 +842,7 @@ _DTypeChar: TypeAlias = L[ "c", # bytes_ (S1) "T", # StringDType ] -_DTypeNum: TypeAlias = L[ +type _DTypeNum = L[ 0, # bool 1, # byte 2, # ubyte @@ -947,35 +871,35 @@ _DTypeNum: TypeAlias = L[ 256, # user-defined 2056, # StringDType ] -_DTypeBuiltinKind: TypeAlias = L[0, 1, 2] +type _DTypeBuiltinKind = L[0, 1, 2] -_ArrayAPIVersion: TypeAlias = L["2021.12", "2022.12", "2023.12", "2024.12"] +type _ArrayAPIVersion = L["2021.12", "2022.12", "2023.12", "2024.12", "2025.12"] -_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "same_value", "unsafe"] +type _CastingKind = L["no", "equiv", "safe", "same_kind", "same_value", "unsafe"] -_OrderKACF: TypeAlias = L["K", "A", "C", "F"] | None -_OrderACF: TypeAlias = L["A", "C", "F"] | None -_OrderCF: TypeAlias = L["C", "F"] | None # noqa: PYI047 +type _OrderKACF = L["K", "A", "C", "F"] | None +type _OrderACF = L["A", "C", "F"] | None +type _OrderCF = L["C", "F"] | None -_ModeKind: TypeAlias = L["raise", "wrap", "clip"] -_PartitionKind: TypeAlias = L["introselect"] +type _ModeKind = L["raise", "wrap", "clip"] +type _PartitionKind = L["introselect"] # in practice, only the first case-insensitive character is considered (so e.g. # "QuantumSort3000" will be interpreted as quicksort). -_SortKind: TypeAlias = L[ +type _SortKind = L[ "Q", "quick", "quicksort", "M", "merge", "mergesort", "H", "heap", "heapsort", "S", "stable", "stablesort", -] -_SortSide: TypeAlias = L["left", "right"] +] # fmt: skip +type _SortSide = L["left", "right"] -_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co -_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co -_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co -_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None -_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None +type _ConvertibleToInt = SupportsInt | SupportsIndex | _CharLike_co +type _ConvertibleToFloat = SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToComplex = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToTD64 = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | bool_ | None +type _ConvertibleToDT64 = dt.date | int | _CharLike_co | character | number | datetime64 | bool_ | None -_NDIterFlagsKind: TypeAlias = L[ +type _NDIterFlagsKind = L[ "buffered", "c_index", "copy_if_overlap", @@ -990,7 +914,7 @@ _NDIterFlagsKind: TypeAlias = L[ "reduce_ok", "zerosize_ok", ] -_NDIterFlagsOp: TypeAlias = L[ +type _NDIterFlagsOp = L[ "aligned", "allocate", "arraymask", @@ -1008,27 +932,23 @@ _NDIterFlagsOp: TypeAlias = L[ "writemasked", ] -_MemMapModeKind: TypeAlias = L[ - "readonly", "r", - "copyonwrite", "c", - "readwrite", "r+", - "write", "w+", -] +type _DT64Item = dt.date | int | None +type _TD64Item = dt.timedelta | int | None -_DT64Date: TypeAlias = _HasDateAttributes | L["TODAY", "today", b"TODAY", b"today"] -_DT64Now: TypeAlias = L["NOW", "now", b"NOW", b"now"] -_NaTValue: TypeAlias = L["NAT", "NaT", "nat", b"NAT", b"NaT", b"nat"] - -_MonthUnit: TypeAlias = L["Y", "M", b"Y", b"M"] -_DayUnit: TypeAlias = L["W", "D", b"W", b"D"] -_DateUnit: TypeAlias = L[_MonthUnit, _DayUnit] -_NativeTimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "Îŧs", b"h", b"m", b"s", b"ms", b"us"] -_IntTimeUnit: TypeAlias = L["ns", "ps", "fs", "as", b"ns", b"ps", b"fs", b"as"] -_TimeUnit: TypeAlias = L[_NativeTimeUnit, _IntTimeUnit] -_NativeTD64Unit: TypeAlias = L[_DayUnit, _NativeTimeUnit] -_IntTD64Unit: TypeAlias = L[_MonthUnit, _IntTimeUnit] -_TD64Unit: TypeAlias = L[_DateUnit, _TimeUnit] -_TimeUnitSpec: TypeAlias = _TD64UnitT | tuple[_TD64UnitT, SupportsIndex] +type _DT64Date = _HasDateAttributes | L["TODAY", "today", b"TODAY", b"today"] +type _DT64Now = L["NOW", "now", b"NOW", b"now"] +type _NaTValue = L["NAT", "NaT", "nat", b"NAT", b"NaT", b"nat"] + +type _MonthUnit = L["Y", "M", b"Y", b"M"] +type _DayUnit = L["W", "D", b"W", b"D"] +type _DateUnit = L[_MonthUnit, _DayUnit] +type _NativeTimeUnit = L["h", "m", "s", "ms", "us", "Îŧs", b"h", b"m", b"s", b"ms", b"us"] +type _IntTimeUnit = L["ns", "ps", "fs", "as", b"ns", b"ps", b"fs", b"as"] +type _TimeUnit = L[_NativeTimeUnit, _IntTimeUnit] +type _NativeTD64Unit = L[_DayUnit, _NativeTimeUnit] +type _IntTD64Unit = L[_MonthUnit, _IntTimeUnit] +type _TD64Unit = L[_DateUnit, _TimeUnit] +type _TimeUnitSpec[UnitT: _TD64Unit] = _TD64Unit | tuple[_TD64Unit, SupportsIndex] ### TypedDict's (for internal use only) @@ -1070,33 +990,30 @@ class _SupportsFileMethods(SupportsFlush, Protocol): def seek(self, offset: int, whence: int, /) -> object: ... @type_check_only -class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ... - -@type_check_only -class _SupportsDLPack(Protocol[_T_contra]): - def __dlpack__(self, /, *, stream: _T_contra | None = None) -> CapsuleType: ... +class _SupportsDLPack[StreamT](Protocol): + def __dlpack__(self, /, *, stream: StreamT | None = None) -> CapsuleType: ... @type_check_only -class _HasDType(Protocol[_T_co]): +class _HasDType[DTypeT](Protocol): # DTypeT bound was intentionally left out @property - def dtype(self, /) -> _T_co: ... + def dtype(self, /) -> DTypeT: ... @type_check_only -class _HasRealAndImag(Protocol[_RealT_co, _ImagT_co]): +class _HasRealAndImag[RealT, ImagT](Protocol): @property - def real(self, /) -> _RealT_co: ... + def real(self, /) -> RealT: ... @property - def imag(self, /) -> _ImagT_co: ... + def imag(self, /) -> ImagT: ... @type_check_only -class _HasTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): +class _HasTypeWithRealAndImag[RealT, ImagT](Protocol): @property - def type(self, /) -> type[_HasRealAndImag[_RealT_co, _ImagT_co]]: ... + def type(self, /) -> py_type[_HasRealAndImag[RealT, ImagT]]: ... @type_check_only -class _HasDTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): +class _HasDTypeWithRealAndImag[RealT, ImagT](Protocol): @property - def dtype(self, /) -> _HasTypeWithRealAndImag[_RealT_co, _ImagT_co]: ... + def dtype(self, /) -> _HasTypeWithRealAndImag[RealT, ImagT]: ... @type_check_only class _HasDateAttributes(Protocol): @@ -1143,9 +1060,9 @@ euler_gamma: Final[float] = ... pi: Final[float] = ... inf: Final[float] = ... nan: Final[float] = ... -little_endian: Final[builtins.bool] = ... -False_: Final[np.bool[L[False]]] = ... -True_: Final[np.bool[L[True]]] = ... +little_endian: Final[py_bool] = ... +False_: Final[bool_[L[False]]] = ... +True_: Final[bool_[L[True]]] = ... newaxis: Final[None] = None # not in __all__ @@ -1153,13 +1070,13 @@ __NUMPY_SETUP__: Final[L[False]] = False __numpy_submodules__: Final[set[LiteralString]] = ... __former_attrs__: Final[_FormerAttrsDict] = ... __future_scalars__: Final[set[L["bytes", "str", "object"]]] = ... -__array_api_version__: Final[L["2024.12"]] = "2024.12" +__array_api_version__: Final[L["2025.12"]] = "2025.12" test: Final[PytestTester] = ... @type_check_only class _DTypeMeta(type): @property - def type(cls, /) -> type[generic] | None: ... + def type(cls, /) -> py_type[generic] | None: ... @property def _abstract(cls, /) -> bool: ... @property @@ -1171,31 +1088,31 @@ class _DTypeMeta(type): @final class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): - names: tuple[builtins.str, ...] | None + names: tuple[py_str, ...] | None def __hash__(self) -> int: ... # `None` results in the default dtype @overload def __new__( cls, - dtype: type[float64 | ct.c_double] | _Float64Codes | _DoubleCodes | None, - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: py_type[float64 | ct.c_double] | _Float64Codes | None, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ... + metadata: dict[py_str, Any] = ... ) -> dtype[float64]: ... # Overload for `dtype` instances, scalar types, and instances that have a - # `dtype: dtype[_ScalarT]` attribute + # `dtype: dtype[ScalarT]` attribute @overload - def __new__( + def __new__[ScalarT: generic]( cls, - dtype: _DTypeLike[_ScalarT], - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _DTypeLike[ScalarT], + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[_ScalarT]: ... + metadata: dict[py_str, Any] = ..., + ) -> dtype[ScalarT]: ... # Builtin types # @@ -1210,56 +1127,56 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[builtins.bool | np.bool | ct.c_bool] | _BoolCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: py_type[py_bool | bool_ | ct.c_bool] | _BoolCodes, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[str, Any] = ..., - ) -> dtype[np.bool]: ... + metadata: dict[py_str, Any] = ..., + ) -> dtype[bool_]: ... @overload def __new__( cls, - dtype: type[int], # also accepts `type[builtins.bool]` - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: py_type[int], # also accepts `type[py_bool]` + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[str, Any] = ..., - ) -> dtype[int_ | np.bool]: ... + metadata: dict[py_str, Any] = ..., + ) -> dtype[int_ | Any]: ... @overload def __new__( cls, - dtype: type[float], # also accepts `type[int | bool]` - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: py_type[float], # also accepts `type[int | bool]` + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[str, Any] = ..., - ) -> dtype[float64 | int_ | np.bool]: ... + metadata: dict[py_str, Any] = ..., + ) -> dtype[float64 | Any]: ... @overload def __new__( cls, - dtype: type[complex], # also accepts `type[float | int | bool]` - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: py_type[complex], # also accepts `type[float | int | bool]` + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[str, Any] = ..., - ) -> dtype[complex128 | float64 | int_ | np.bool]: ... + metadata: dict[py_str, Any] = ..., + ) -> dtype[complex128 | Any]: ... @overload def __new__( cls, - dtype: type[bytes | ct.c_char] | _BytesCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: py_type[bytes | ct.c_char] | _BytesCodes, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[bytes_]: ... @overload def __new__( cls, - dtype: type[str] | _StrCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: py_type[py_str] | _StrCodes, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[str_]: ... # NOTE: These `memoryview` overloads assume PEP 688, which requires mypy to # be run with the (undocumented) `--disable-memoryview-promotion` flag, @@ -1270,224 +1187,300 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[void | memoryview] | _VoidDTypeLike | _VoidCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: py_type[void | memoryview] | _VoidDTypeLike | _VoidCodes, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[void]: ... # NOTE: `_: type[object]` would also accept e.g. `type[object | complex]`, # and is therefore not included here @overload def __new__( cls, - dtype: type[object_ | _BuiltinObjectLike | ct.py_object[Any]] | _ObjectCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: py_type[object_ | _BuiltinObjectLike | ct.py_object[Any]] | _ObjectCodes, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[object_]: ... # `unsignedinteger` string-based representations and ctypes @overload def __new__( cls, - dtype: _UInt8Codes | _UByteCodes | type[ct.c_uint8 | ct.c_ubyte], - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _UInt8Codes | py_type[ct.c_uint8], + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[uint8]: ... @overload def __new__( cls, - dtype: _UInt16Codes | _UShortCodes | type[ct.c_uint16 | ct.c_ushort], - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _UInt16Codes | py_type[ct.c_uint16 | ct.c_ushort], + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[uint16]: ... @overload def __new__( cls, - dtype: _UInt32Codes | _UIntCCodes | type[ct.c_uint32 | ct.c_uint], - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _UInt32Codes | _UIntCCodes | py_type[ct.c_uint32 | ct.c_uint], + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[uint32]: ... @overload def __new__( cls, - dtype: _UInt64Codes | _ULongLongCodes | type[ct.c_uint64 | ct.c_ulonglong], - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _UInt64Codes | _ULongLongCodes | py_type[ct.c_uint64 | ct.c_ulonglong], + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[uint64]: ... @overload def __new__( cls, - dtype: _UIntPCodes | type[ct.c_void_p | ct.c_size_t], - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _UIntPCodes | py_type[ct.c_void_p | ct.c_size_t], + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[uintp]: ... @overload def __new__( cls, - dtype: _ULongCodes | type[ct.c_ulong], - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _ULongCodes | py_type[ct.c_ulong], + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[ulong]: ... + metadata: dict[py_str, Any] = ..., + ) -> dtype[uint32 | uint64]: ... # `signedinteger` string-based representations and ctypes @overload def __new__( cls, - dtype: _Int8Codes | _ByteCodes | type[ct.c_int8 | ct.c_byte], - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _Int8Codes | py_type[ct.c_int8], + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[int8]: ... @overload def __new__( cls, - dtype: _Int16Codes | _ShortCodes | type[ct.c_int16 | ct.c_short], - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _Int16Codes | py_type[ct.c_int16 | ct.c_short], + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[int16]: ... @overload def __new__( cls, - dtype: _Int32Codes | _IntCCodes | type[ct.c_int32 | ct.c_int], - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _Int32Codes | _IntCCodes | py_type[ct.c_int32 | ct.c_int], + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[int32]: ... @overload def __new__( cls, - dtype: _Int64Codes | _LongLongCodes | type[ct.c_int64 | ct.c_longlong], - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _Int64Codes | _LongLongCodes | py_type[ct.c_int64 | ct.c_longlong], + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[int64]: ... @overload def __new__( cls, - dtype: _IntPCodes | type[intp | ct.c_ssize_t], - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _IntPCodes | py_type[intp | ct.c_ssize_t], + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[intp]: ... @overload def __new__( cls, - dtype: _LongCodes | type[ct.c_long], - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _LongCodes | py_type[ct.c_long], + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[long]: ... + metadata: dict[py_str, Any] = ..., + ) -> dtype[int32 | int64]: ... # `floating` string-based representations and ctypes @overload def __new__( cls, - dtype: _Float16Codes | _HalfCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _Float16Codes, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[float16]: ... @overload def __new__( cls, - dtype: _Float32Codes | _SingleCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _Float32Codes | py_type[ct.c_float], + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[float32]: ... # float64 codes are covered by overload 1 @overload def __new__( cls, - dtype: _LongDoubleCodes | type[ct.c_longdouble], - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _LongDoubleCodes | py_type[ct.c_longdouble], + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[longdouble]: ... - # `complexfloating` string-based representations - @overload + # `complexfloating` string-based representations and ctypes + if sys.version_info < (3, 14) or sys.platform == "win32": + @overload + def __new__( + cls, + dtype: _Complex64Codes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[complex64]: ... + @overload + def __new__( + cls, + dtype: _Complex128Codes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[complex128]: ... + @overload + def __new__( + cls, + dtype: _CLongDoubleCodes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[clongdouble]: ... + else: + @overload + def __new__( + cls, + dtype: _Complex64Codes | py_type[ct.c_float_complex], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[complex64]: ... + @overload + def __new__( + cls, + dtype: _Complex128Codes | py_type[ct.c_double_complex], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[complex128]: ... + @overload + def __new__( + cls, + dtype: _CLongDoubleCodes | py_type[ct.c_longdouble_complex], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[clongdouble]: ... + + # datetime64 + @overload # datetime64[{Y,M,W,D}] def __new__( cls, - dtype: _Complex64Codes | _CSingleCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _DT64Codes_date, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[complex64]: ... - @overload + metadata: dict[py_str, Any] = ..., + ) -> dtype[datetime64[dt.date]]: ... + @overload # datetime64[{h,m,s,ms,us}] def __new__( cls, - dtype: _Complex128Codes | _CDoubleCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _DT64Codes_datetime, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[complex128]: ... - @overload + metadata: dict[py_str, Any] = ..., + ) -> dtype[datetime64[dt.datetime]]: ... + @overload # datetime64[{ns,ps,fs,as}] + def __new__( + cls, + dtype: _DT64Codes_int, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[datetime64[int]]: ... + @overload # datetime64[?] def __new__( cls, - dtype: _CLongDoubleCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _DT64Codes_any, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[clongdouble]: ... + metadata: dict[py_str, Any] = ..., + ) -> dtype[datetime64]: ... - # Miscellaneous string-based representations and ctypes - @overload + # timedelta64 + @overload # timedelta64[{W,D,h,m,s,ms,us}] def __new__( cls, - dtype: _TD64Codes, - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _TD64Codes_timedelta, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[timedelta64]: ... - @overload + metadata: dict[py_str, Any] = ..., + ) -> dtype[timedelta64[dt.timedelta]]: ... + @overload # timedelta64[{Y,M,ns,ps,fs,as}] def __new__( cls, - dtype: _DT64Codes, - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _TD64Codes_int, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[datetime64]: ... + metadata: dict[py_str, Any] = ..., + ) -> dtype[timedelta64[int]]: ... + @overload # timedelta64[?] + def __new__( + cls, + dtype: _TD64Codes_any, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[timedelta64]: ... # `StringDType` requires special treatment because it has no scalar type @overload def __new__( cls, dtype: dtypes.StringDType | _StringCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtypes.StringDType: ... # Combined char-codes and ctypes, analogous to the scalar-type hierarchy @@ -1495,75 +1488,75 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _UnsignedIntegerCodes | _UnsignedIntegerCType, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[unsignedinteger]: ... @overload def __new__( cls, dtype: _SignedIntegerCodes | _SignedIntegerCType, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[signedinteger]: ... @overload def __new__( cls, dtype: _IntegerCodes | _IntegerCType, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[integer]: ... @overload def __new__( cls, dtype: _FloatingCodes | _FloatingCType, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[floating]: ... @overload def __new__( cls, dtype: _ComplexFloatingCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[complexfloating]: ... @overload def __new__( cls, dtype: _InexactCodes | _FloatingCType, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[inexact]: ... @overload def __new__( cls, - dtype: _CharacterCodes | type[bytes | builtins.str | ct.c_char], - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: _CharacterCodes | py_type[bytes | py_str | ct.c_char], + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[character]: ... # Handle strings that can't be expressed as literals; i.e. "S1", "S2", ... @overload def __new__( cls, - dtype: builtins.str, - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: py_str, + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype: ... # Catch-all overload for object-likes @@ -1575,25 +1568,25 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[object], - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: py_type[object], + align: py_bool = False, + copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[object_ | Any]: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload - def __getitem__(self: dtype[void], key: list[builtins.str], /) -> dtype[void]: ... + def __getitem__(self: dtype[void], key: list[py_str], /) -> dtype[void]: ... @overload - def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex, /) -> dtype: ... + def __getitem__(self: dtype[void], key: py_str | SupportsIndex, /) -> dtype: ... # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes @overload - def __mul__(self: _DTypeT, value: L[1], /) -> _DTypeT: ... + def __mul__[DTypeT: dtype](self: DTypeT, value: L[1], /) -> DTypeT: ... @overload - def __mul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... + def __mul__[FlexibleDTypeT: dtype[flexible]](self: FlexibleDTypeT, value: SupportsIndex, /) -> FlexibleDTypeT: ... @overload def __mul__(self, value: SupportsIndex, /) -> dtype[void]: ... @@ -1601,20 +1594,20 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # literals as of mypy 0.902. Set the return-type to `dtype` for # now for non-flexible dtypes. @overload - def __rmul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... + def __rmul__[FlexibleDTypeT: dtype[flexible]](self: FlexibleDTypeT, value: SupportsIndex, /) -> FlexibleDTypeT: ... @overload def __rmul__(self, value: SupportsIndex, /) -> dtype: ... - def __gt__(self, other: DTypeLike | None, /) -> builtins.bool: ... - def __ge__(self, other: DTypeLike | None, /) -> builtins.bool: ... - def __lt__(self, other: DTypeLike | None, /) -> builtins.bool: ... - def __le__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __gt__(self, other: DTypeLike | None, /) -> py_bool: ... + def __ge__(self, other: DTypeLike | None, /) -> py_bool: ... + def __lt__(self, other: DTypeLike | None, /) -> py_bool: ... + def __le__(self, other: DTypeLike | None, /) -> py_bool: ... # Explicitly defined `__eq__` and `__ne__` to get around mypy's # `strict_equality` option; even though their signatures are # identical to their `object`-based counterpart - def __eq__(self, other: Any, /) -> builtins.bool: ... - def __ne__(self, other: Any, /) -> builtins.bool: ... + def __eq__(self, other: Any, /) -> py_bool: ... + def __ne__(self, other: Any, /) -> py_bool: ... @property def alignment(self) -> int: ... @@ -1631,19 +1624,19 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @property def flags(self) -> int: ... @property - def hasobject(self) -> builtins.bool: ... + def hasobject(self) -> py_bool: ... @property def isbuiltin(self) -> _DTypeBuiltinKind: ... @property - def isnative(self) -> builtins.bool: ... + def isnative(self) -> py_bool: ... @property - def isalignedstruct(self) -> builtins.bool: ... + def isalignedstruct(self) -> py_bool: ... @property def itemsize(self) -> int: ... @property def kind(self) -> _DTypeKind: ... @property - def metadata(self) -> MappingProxyType[builtins.str, Any] | None: ... + def metadata(self) -> MappingProxyType[py_str, Any] | None: ... @property def name(self) -> LiteralString: ... @property @@ -1658,65 +1651,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @property def str(self) -> LiteralString: ... @property - def type(self) -> type[_ScalarT_co]: ... - -@final -class flatiter(Generic[_ArrayT_co]): - __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] - - @property - def base(self, /) -> _ArrayT_co: ... - @property - def coords(self: flatiter[ndarray[_ShapeT]], /) -> _ShapeT: ... - @property - def index(self, /) -> int: ... - - # iteration - def __len__(self, /) -> int: ... - def __iter__(self, /) -> Self: ... - def __next__(self: flatiter[NDArray[_ScalarT]], /) -> _ScalarT: ... - - # indexing - @overload # nd: _[()] - def __getitem__(self, key: tuple[()], /) -> _ArrayT_co: ... - @overload # 0d; _[] - def __getitem__(self: flatiter[NDArray[_ScalarT]], key: int | integer, /) -> _ScalarT: ... - @overload # 1d; _[[*]], _[:], _[...] - def __getitem__( - self: flatiter[ndarray[Any, _DTypeT]], - key: list[int] | slice | EllipsisType | flatiter[NDArray[integer]], - /, - ) -> ndarray[tuple[int], _DTypeT]: ... - @overload # 2d; _[[*[*]]] - def __getitem__( - self: flatiter[ndarray[Any, _DTypeT]], - key: list[list[int]], - /, - ) -> ndarray[tuple[int, int], _DTypeT]: ... - @overload # ?d - def __getitem__( - self: flatiter[ndarray[Any, _DTypeT]], - key: NDArray[integer] | _NestedSequence[int], - /, - ) -> ndarray[_AnyShape, _DTypeT]: ... - - # NOTE: `__setitem__` operates via `unsafe` casting rules, and can thus accept any - # type accepted by the relevant underlying `np.generic` constructor, which isn't - # known statically. So we cannot meaningfully annotate the value parameter. - def __setitem__(self, key: slice | EllipsisType | _ArrayLikeInt, val: object, /) -> None: ... - - # NOTE: `dtype` and `copy` are no-ops at runtime, so we don't support them here to - # avoid confusion - def __array__( - self: flatiter[ndarray[Any, _DTypeT]], - dtype: None = None, - /, - *, - copy: None = None, - ) -> ndarray[tuple[int], _DTypeT]: ... - - # This returns a flat copy of the underlying array, not of the iterator itself - def copy(self: flatiter[ndarray[Any, _DTypeT]], /) -> ndarray[tuple[int], _DTypeT]: ... + def type(self) -> py_type[_ScalarT_co]: ... @type_check_only class _ArrayOrScalarCommon: @@ -1739,7 +1674,7 @@ class _ArrayOrScalarCommon: @property def device(self) -> L["cpu"]: ... - def __bool__(self, /) -> builtins.bool: ... + def __bool__(self, /) -> py_bool: ... def __int__(self, /) -> int: ... def __float__(self, /) -> float: ... def __copy__(self) -> Self: ... @@ -1768,9 +1703,9 @@ class _ArrayOrScalarCommon: self, /, *, - write: builtins.bool | None = None, - align: builtins.bool | None = None, - uic: builtins.bool | None = None, + write: py_bool | None = None, + align: py_bool | None = None, + uic: py_bool | None = None, ) -> None: ... @property @@ -1778,13 +1713,13 @@ class _ArrayOrScalarCommon: @property def __array_priority__(self) -> float: ... @property - def __array_struct__(self) -> CapsuleType: ... # builtins.PyCapsule + def __array_struct__(self) -> CapsuleType: ... def __array_namespace__(self, /, *, api_version: _ArrayAPIVersion | None = None) -> ModuleType: ... def __setstate__(self, state: tuple[ SupportsIndex, # version _ShapeLike, # Shape _DTypeT_co, # DType - np.bool, # F-continuous + bool_, # F-continuous bytes | list[Any], # Data ], /) -> None: ... @@ -1797,39 +1732,47 @@ class _ArrayOrScalarCommon: kind: _SortKind | None = ..., order: str | Sequence[str] | None = ..., *, - stable: builtins.bool | None = ..., + stable: py_bool | None = ..., ) -> NDArray[intp]: ... @overload # axis=None (default), out=None (default), keepdims=False (default) def argmax(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... @overload # axis=index, out=None (default) - def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: py_bool = False) -> Any: ... @overload # axis=index, out=ndarray - def argmax(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + def argmax[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: py_bool = False + ) -> OutT: ... @overload - def argmax(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + def argmax[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: py_bool = False + ) -> OutT: ... @overload # axis=None (default), out=None (default), keepdims=False (default) def argmin(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... @overload # axis=index, out=None (default) - def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: py_bool = False) -> Any: ... @overload # axis=index, out=ndarray - def argmin(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + def argmin[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: py_bool = False + ) -> OutT: ... @overload - def argmin(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + def argmin[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: py_bool = False + ) -> OutT: ... # Keep in sync with `MaskedArray.round` @overload # out=None (default) def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... @overload # out=ndarray - def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ... + def round[ArrayT: ndarray](self, /, decimals: SupportsIndex, out: ArrayT) -> ArrayT: ... @overload - def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... + def round[ArrayT: ndarray](self, /, decimals: SupportsIndex = 0, *, out: ArrayT) -> ArrayT: ... @overload # out=None (default) def choose(self, /, choices: ArrayLike, out: None = None, mode: _ModeKind = "raise") -> NDArray[Any]: ... @overload # out=ndarray - def choose(self, /, choices: ArrayLike, out: _ArrayT, mode: _ModeKind = "raise") -> _ArrayT: ... + def choose[ArrayT: ndarray](self, /, choices: ArrayLike, out: ArrayT, mode: _ModeKind = "raise") -> ArrayT: ... # TODO: Annotate kwargs with an unpacked `TypedDict` @overload # out: None (default) @@ -1839,36 +1782,42 @@ class _ArrayOrScalarCommon: @overload def clip(self, /, min: None = None, *, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ... @overload # out: ndarray - def clip(self, /, min: ArrayLike, max: ArrayLike | None, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + def clip[ArrayT: ndarray](self, /, min: ArrayLike, max: ArrayLike | None, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload - def clip(self, /, min: ArrayLike, max: ArrayLike | None = None, *, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + def clip[ArrayT: ndarray](self, /, min: ArrayLike, max: ArrayLike | None = None, *, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload - def clip(self, /, min: None, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + def clip[ArrayT: ndarray](self, /, min: None, max: ArrayLike, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload - def clip(self, /, min: None = None, *, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + def clip[ArrayT: ndarray](self, /, min: None = None, *, max: ArrayLike, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None) -> NDArray[Any]: ... @overload - def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None, out: _ArrayT) -> _ArrayT: ... + def compress[ArrayT: ndarray](self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None, out: ArrayT) -> ArrayT: ... @overload - def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: _ArrayT) -> _ArrayT: ... + def compress[ArrayT: ndarray]( + self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: ArrayT + ) -> ArrayT: ... - # Keep in sync with `MaskedArray.cumprod` + # @overload # out: None (default) def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray - def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def cumprod[ArrayT: ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload - def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def cumprod[ArrayT: ndarray]( + self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: ArrayT + ) -> ArrayT: ... - # Keep in sync with `MaskedArray.cumsum` + # @overload # out: None (default) def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray - def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def cumsum[ArrayT: ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload - def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def cumsum[ArrayT: ndarray]( + self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: ArrayT + ) -> ArrayT: ... @overload def max( @@ -1877,32 +1826,32 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, out: None = None, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def max( + def max[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def max( + def max[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, *, - out: _ArrayT, - keepdims: builtins.bool | _NoValueType = ..., + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def min( @@ -1911,32 +1860,32 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, out: None = None, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def min( + def min[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def min( + def min[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, *, - out: _ArrayT, - keepdims: builtins.bool | _NoValueType = ..., + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def sum( @@ -1946,34 +1895,34 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, out: None = None, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def sum( + def sum[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def sum( + def sum[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, - keepdims: builtins.bool | _NoValueType = ..., + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def prod( @@ -1983,34 +1932,34 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, out: None = None, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def prod( + def prod[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def prod( + def prod[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, - keepdims: builtins.bool | _NoValueType = ..., + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def mean( @@ -2019,31 +1968,31 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, out: None = None, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def mean( + def mean[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def mean( + def mean[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, - keepdims: builtins.bool | _NoValueType = ..., + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def std( @@ -2053,37 +2002,37 @@ class _ArrayOrScalarCommon: out: None = None, ddof: float = 0, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., ) -> Any: ... @overload - def std( + def std[ArrayT: ndarray]( self, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def std( + def std[ArrayT: ndarray]( self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def var( @@ -2093,37 +2042,37 @@ class _ArrayOrScalarCommon: out: None = None, ddof: float = 0, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., ) -> Any: ... @overload - def var( + def var[ArrayT: ndarray]( self, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def var( + def var[ArrayT: ndarray]( self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] @@ -2133,12 +2082,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def ndim(self) -> int: ... @property def size(self) -> int: ... + @property - def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + def real[ScalarT: generic](self: _HasDTypeWithRealAndImag[ScalarT, object], /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @real.setter def real(self, value: ArrayLike, /) -> None: ... + @property - def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + def imag[ScalarT: generic](self: _HasDTypeWithRealAndImag[object, ScalarT], /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @imag.setter def imag(self, value: ArrayLike, /) -> None: ... @@ -2146,26 +2097,26 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): cls, shape: _ShapeLike, dtype: DTypeLike | None = ..., - buffer: _SupportsBuffer | None = ..., + buffer: Buffer | None = ..., offset: SupportsIndex = ..., strides: _ShapeLike | None = ..., order: _OrderKACF = ..., ) -> Self: ... - if sys.version_info >= (3, 12): - def __buffer__(self, flags: int, /) -> memoryview: ... + def __buffer__(self, flags: int, /) -> memoryview: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload - def __array__(self, dtype: None = None, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __array__(self, dtype: None = None, /, *, copy: py_bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__(self, dtype: _DTypeT, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT]: ... + def __array__[DTypeT: _dtype](self, dtype: DTypeT, /, *, copy: py_bool | None = None) -> ndarray[_ShapeT_co, DTypeT]: ... def __array_ufunc__( self, ufunc: ufunc, method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + /, *inputs: Any, **kwargs: Any, ) -> Any: ... @@ -2183,13 +2134,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # grant subclasses a bit more flexibility def __array_finalize__(self, obj: NDArray[Any] | None, /) -> None: ... - def __array_wrap__( + def __array_wrap__[ShapeT: _Shape, DTypeT: _dtype]( self, - array: ndarray[_ShapeT, _DTypeT], + array: ndarray[ShapeT, DTypeT], context: tuple[ufunc, tuple[Any, ...], int] | None = ..., - return_scalar: builtins.bool = ..., + return_scalar: py_bool = ..., /, - ) -> ndarray[_ShapeT, _DTypeT]: ... + ) -> ndarray[ShapeT, DTypeT]: ... # Keep in sync with `MaskedArray.__getitem__` @overload @@ -2201,11 +2152,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # can be of any shape def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co | _AnyShape]: ... @overload - def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co | _AnyShape, dtype[void]]: ... + def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co | _AnyShape, _dtype[void]]: ... @overload # flexible | object_ | bool def __setitem__( - self: ndarray[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + self: ndarray[Any, _dtype[flexible | object_ | bool_] | dtypes.StringDType], key: _ToIndices, value: object, /, @@ -2257,7 +2208,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @property def shape(self) -> _ShapeT_co: ... @shape.setter - @deprecated("In-place shape modification will be deprecated in NumPy 2.5.", category=PendingDeprecationWarning) + @deprecated("In-place shape modification has been deprecated in NumPy 2.5.") def shape(self, value: _ShapeLike) -> None: ... # @@ -2268,12 +2219,12 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def strides(self, value: _ShapeLike) -> None: ... # - def byteswap(self, inplace: builtins.bool = ...) -> Self: ... + def byteswap(self, inplace: py_bool = ...) -> Self: ... @property def flat(self) -> flatiter[Self]: ... @overload # use the same output type as that of the underlying `generic` - def item(self: NDArray[generic[_T]], i0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /, *args: SupportsIndex) -> _T: ... + def item[T](self: NDArray[generic[T]], i0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /, *args: SupportsIndex) -> T: ... @overload # special casing for `StringDType`, which has no scalar type def item( self: ndarray[Any, dtypes.StringDType], @@ -2284,22 +2235,24 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with `ma.MaskedArray.tolist` @overload # this first overload prevents mypy from over-eagerly selecting `tuple[()]` in case of `_AnyShape` - def tolist(self: ndarray[tuple[Never], dtype[generic[_T]]], /) -> Any: ... + def tolist[T](self: ndarray[tuple[Never], _dtype[generic[T]]], /) -> Any: ... @overload - def tolist(self: ndarray[tuple[()], dtype[generic[_T]]], /) -> _T: ... + def tolist[T](self: ndarray[tuple[()], _dtype[generic[T]]], /) -> T: ... @overload - def tolist(self: ndarray[tuple[int], dtype[generic[_T]]], /) -> list[_T]: ... + def tolist[T](self: ndarray[tuple[int], _dtype[generic[T]]], /) -> list[T]: ... @overload - def tolist(self: ndarray[tuple[int, int], dtype[generic[_T]]], /) -> list[list[_T]]: ... + def tolist[T](self: ndarray[tuple[int, int], _dtype[generic[T]]], /) -> list[list[T]]: ... @overload - def tolist(self: ndarray[tuple[int, int, int], dtype[generic[_T]]], /) -> list[list[list[_T]]]: ... + def tolist[T](self: ndarray[tuple[int, int, int], _dtype[generic[T]]], /) -> list[list[list[T]]]: ... @overload def tolist(self, /) -> Any: ... @overload - def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = True) -> None: ... + @deprecated("Resizing a NumPy array inplace has been deprecated in NumPy 2.5") + def resize(self, new_shape: _ShapeLike, /, *, refcheck: py_bool = True) -> None: ... @overload - def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = True) -> None: ... + @deprecated("Resizing a NumPy array inplace has been deprecated in NumPy 2.5") + def resize(self, /, *new_shape: SupportsIndex, refcheck: py_bool = True) -> None: ... # keep in sync with `ma.MaskedArray.squeeze` def squeeze( @@ -2315,209 +2268,1523 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def transpose(self, /, *axes: SupportsIndex) -> Self: ... + # keep in sync with `ndarray.argmin` (below) and `ma.MaskedArray.argmax` + @override # type: ignore[override] + @overload + def argmax( + self, + axis: None = None, + out: None = None, + *, + keepdims: L[False] = False, + ) -> intp: ... + @overload # axis: + def argmax( + self, + axis: SupportsIndex, + out: None = None, + *, + keepdims: L[False] = False, + ) -> NDArray[intp]: ... + @overload # keepdims: True + def argmax( + self, + axis: SupportsIndex | None = None, + out: None = None, + *, + keepdims: L[True], + ) -> ndarray[_ShapeT_co, dtype[intp]]: ... + @overload # out: (keyword) + def argmax[ArrayT: NDArray[intp]]( + self, + axis: SupportsIndex | None = None, + *, + out: ArrayT, + keepdims: py_bool = False, + ) -> ArrayT: ... + @overload # out: (positional) + def argmax[ArrayT: NDArray[intp]]( # pyright: ignore[reportIncompatibleMethodOverride] + self, + axis: SupportsIndex | None, + out: ArrayT, + *, + keepdims: py_bool = False, + ) -> ArrayT: ... + + # keep in sync with `ndarray.argmax` (above) and `ma.MaskedArray.argmin` + @override # type: ignore[override] + @overload + def argmin( + self, + axis: None = None, + out: None = None, + *, + keepdims: L[False] = False, + ) -> intp: ... + @overload # axis: + def argmin( + self, + axis: SupportsIndex, + out: None = None, + *, + keepdims: L[False] = False, + ) -> NDArray[intp]: ... + @overload # keepdims: True + def argmin( + self, + axis: SupportsIndex | None = None, + out: None = None, + *, + keepdims: L[True], + ) -> ndarray[_ShapeT_co, dtype[intp]]: ... + @overload # out: (keyword) + def argmin[ArrayT: NDArray[intp]]( + self, + axis: SupportsIndex | None = None, + *, + out: ArrayT, + keepdims: py_bool = False, + ) -> ArrayT: ... + @overload # out: (positional) + def argmin[ArrayT: NDArray[intp]]( # pyright: ignore[reportIncompatibleMethodOverride] + self, + axis: SupportsIndex | None, + out: ArrayT, + *, + keepdims: py_bool = False, + ) -> ArrayT: ... + + # + # keep in sync with `ndarray.any` (below) @overload def all( self, axis: None = None, out: None = None, - keepdims: L[False, 0] = False, + keepdims: L[False] = False, *, where: _ArrayLikeBool_co = True - ) -> np.bool: ... - @overload + ) -> bool_: ... + @overload # axis: def all( self, - axis: int | tuple[int, ...] | None = None, + axis: int | tuple[int, ...], out: None = None, - keepdims: SupportsIndex = False, + keepdims: L[False] = False, *, where: _ArrayLikeBool_co = True, - ) -> np.bool | NDArray[np.bool]: ... - @overload + ) -> NDArray[bool_]: ... + @overload # keepdims: True def all( self, - axis: int | tuple[int, ...] | None, - out: _ArrayT, - keepdims: SupportsIndex = False, + axis: int | tuple[int, ...] | None = None, + out: None = None, *, + keepdims: L[True], where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... - @overload - def all( + ) -> ndarray[_ShapeT_co, dtype[bool_]]: ... + @overload # out: (keyword) + def all[ArrayT: ndarray]( self, axis: int | tuple[int, ...] | None = None, *, - out: _ArrayT, - keepdims: SupportsIndex = False, + out: ArrayT, + keepdims: py_bool = False, + where: _ArrayLikeBool_co = True, + ) -> ArrayT: ... + @overload # out: (positional) + def all[ArrayT: ndarray]( + self, + axis: int | tuple[int, ...] | None, + out: ArrayT, + keepdims: py_bool = False, + *, where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + ) -> ArrayT: ... + # keep in sync with `ndarray.all` (above) @overload def any( self, axis: None = None, out: None = None, - keepdims: L[False, 0] = False, + keepdims: L[False] = False, *, where: _ArrayLikeBool_co = True - ) -> np.bool: ... - @overload + ) -> bool_: ... + @overload # axis: def any( self, - axis: int | tuple[int, ...] | None = None, + axis: int | tuple[int, ...], out: None = None, - keepdims: SupportsIndex = False, + keepdims: L[False] = False, *, where: _ArrayLikeBool_co = True, - ) -> np.bool | NDArray[np.bool]: ... - @overload + ) -> NDArray[bool_]: ... + @overload # keepdims: True def any( self, - axis: int | tuple[int, ...] | None, - out: _ArrayT, - keepdims: SupportsIndex = False, + axis: int | tuple[int, ...] | None = None, + out: None = None, *, + keepdims: L[True], where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... - @overload - def any( + ) -> ndarray[_ShapeT_co, dtype[bool_]]: ... + @overload # out: (keyword) + def any[ArrayT: ndarray]( self, axis: int | tuple[int, ...] | None = None, *, - out: _ArrayT, - keepdims: SupportsIndex = False, + out: ArrayT, + keepdims: py_bool = False, where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... - - # - @overload - def partition( - self, - kth: _ArrayLikeInt, - /, - axis: SupportsIndex = -1, - kind: _PartitionKind = "introselect", - order: None = None, - ) -> None: ... - @overload - def partition( - self: NDArray[void], - kth: _ArrayLikeInt, - /, - axis: SupportsIndex = -1, - kind: _PartitionKind = "introselect", - order: str | Sequence[str] | None = None, - ) -> None: ... - - # - @overload - def argpartition( - self, - kth: _ArrayLikeInt, - /, - axis: SupportsIndex | None = -1, - kind: _PartitionKind = "introselect", - order: None = None, - ) -> NDArray[intp]: ... - @overload - def argpartition( - self: NDArray[void], - kth: _ArrayLikeInt, - /, - axis: SupportsIndex | None = -1, - kind: _PartitionKind = "introselect", - order: str | Sequence[str] | None = None, - ) -> NDArray[intp]: ... - - # keep in sync with `ma.MaskedArray.diagonal` - def diagonal( - self, - offset: SupportsIndex = 0, - axis1: SupportsIndex = 0, - axis2: SupportsIndex = 1, - ) -> ndarray[_AnyShape, _DTypeT_co]: ... - - # 1D + 1D returns a scalar; - # all other with at least 1 non-0D array return an ndarray. - @overload - def dot(self, b: _ScalarLike_co, /, out: None = None) -> NDArray[Any]: ... - @overload - def dot(self, b: ArrayLike, /, out: None = None) -> Any: ... - @overload - def dot(self, b: ArrayLike, /, out: _ArrayT) -> _ArrayT: ... - - # `nonzero()` raises for 0d arrays/generics - def nonzero(self) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... - - @overload - def searchsorted( - self, # >= 1D array - v: _ScalarLike_co, # 0D array-like - /, - side: _SortSide = "left", - sorter: _ArrayLikeInt_co | None = None, - ) -> intp: ... - @overload - def searchsorted( - self, # >= 1D array - v: ArrayLike, - /, - side: _SortSide = "left", - sorter: _ArrayLikeInt_co | None = None, - ) -> NDArray[intp]: ... - - def sort( + ) -> ArrayT: ... + @overload # out: (positional) + def any[ArrayT: ndarray]( self, - /, - axis: SupportsIndex = -1, - kind: _SortKind | None = None, - order: str | Sequence[str] | None = None, + axis: int | tuple[int, ...] | None, + out: ArrayT, + keepdims: py_bool = False, *, - stable: builtins.bool | None = None, - ) -> None: ... + where: _ArrayLikeBool_co = True, + ) -> ArrayT: ... - # Keep in sync with `MaskedArray.trace` - @overload - def trace( - self, # >= 2D array - /, - offset: SupportsIndex = 0, - axis1: SupportsIndex = 0, - axis2: SupportsIndex = 1, - dtype: DTypeLike | None = None, + # keep in sync with `sum` below (but without `timedelta64`) + @override # type: ignore[override] + @overload # ~number + def prod[ScalarT: number]( + self: NDArray[ScalarT], + axis: None = None, + dtype: None = None, out: None = None, - ) -> Any: ... - @overload - def trace( - self, # >= 2D array - /, - offset: SupportsIndex = 0, - axis1: SupportsIndex = 0, - axis2: SupportsIndex = 1, - dtype: DTypeLike | None = None, *, - out: _ArrayT, - ) -> _ArrayT: ... - @overload - def trace( - self, # >= 2D array - /, - offset: SupportsIndex, - axis1: SupportsIndex, - axis2: SupportsIndex, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ScalarT: ... + @overload # ~number, axis: + def prod[ScalarT: number | object_]( + self: NDArray[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # ~number | object_, keepdims=True + def prod[ArrayT: NDArray[number | object_]]( + self: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # bool_ + def prod( + self: NDArray[bool_], + axis: None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> int_: ... + @overload # bool_, axis: + def prod( + self: NDArray[bool_], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[int_]: ... + @overload # bool_, keepdims=True + def prod( + self: NDArray[bool_], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[True], + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[int_]]: ... + @overload # object_ + def prod( + self: NDArray[object_], + axis: None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # dtype: ScalarT + def prod[ScalarT: generic]( + self: NDArray[number | bool_ | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ScalarT: ... + @overload # dtype: ScalarT (keyword), keepdims=True + def prod[ScalarT: generic]( + self: NDArray[number | bool_ | object_], + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # dtype: ScalarT (positional), keepdims=True + def prod[ScalarT: generic]( + self: NDArray[number | bool_ | object_], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # axis: , dtype: ScalarT + def prod[ScalarT: generic]( + self: NDArray[number | bool_ | object_], + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # out: ArrayT (keyword) + def prod[ArrayT: ndarray]( + self: NDArray[number | bool_ | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # out: ArrayT (positional) + def prod[ArrayT: ndarray]( + self: NDArray[number | bool_ | object_], + axis: int | tuple[int, ...] | None, + dtype: DTypeLike | None, + out: ArrayT, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # fallback + def prod( + self: NDArray[number | bool_ | object_], + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # fallback, axis: + def prod( + self: NDArray[number | bool_ | object_], + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray: ... + @overload # fallback, keepdims=True + def prod( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co]: ... + + # keep in sync with `prod` above (but also accept `timedelta64`) + @override # type: ignore[override] + @overload # ~number | timedelta64 + def sum[ScalarT: number | timedelta64]( + self: NDArray[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ScalarT: ... + @overload # ~number | timedelta64, axis: + def sum[ScalarT: number | timedelta64 | object_]( + self: NDArray[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # ~number | timedelta64 | object_, keepdims=True + def sum[ArrayT: NDArray[number | timedelta64 | object_]]( + self: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # bool_ + def sum( + self: NDArray[bool_], + axis: None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> int_: ... + @overload # bool_, axis: + def sum( + self: NDArray[bool_], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[int_]: ... + @overload # bool_, keepdims=True + def sum( + self: NDArray[bool_], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[True], + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[int_]]: ... + @overload # object_ + def sum( + self: NDArray[object_], + axis: None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # dtype: ScalarT + def sum[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ScalarT: ... + @overload # dtype: ScalarT (keyword), keepdims=True + def sum[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # dtype: ScalarT (positional), keepdims=True + def sum[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # axis: , dtype: ScalarT + def sum[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # out: ArrayT (keyword) + def sum[ArrayT: ndarray]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # out: ArrayT (positional) + def sum[ArrayT: ndarray]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None, + dtype: DTypeLike | None, + out: ArrayT, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # fallback + def sum( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # fallback, axis: + def sum( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray: ... + @overload # fallback, keepdims=True + def sum( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co]: ... + + # keep in sync with `MaskedArray.cumprod` + @override # type: ignore[override] + @overload # number | object_ + def cumprod[DTypeT: dtype[number | object_]]( + self: ndarray[Any, DTypeT], + axis: None = None, + dtype: None = None, + out: None = None, + ) -> ndarray[_1D, DTypeT]: ... + @overload # bool_ + def cumprod( + self: NDArray[bool_], + axis: None = None, + dtype: None = None, + out: None = None, + ) -> ndarray[_1D, dtype[int_]]: ... + @overload # dtype: (keyword) + def cumprod[ScalarT: generic]( + self: NDArray[number | bool_ | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> ndarray[_1D, dtype[ScalarT]]: ... + @overload # dtype: (keyword) + def cumprod( + self: NDArray[number | bool_ | object_], + axis: None = None, + *, + dtype: DTypeLike, + out: None = None, + ) -> ndarray[_1D]: ... + @overload # dtype: (positional) + def cumprod[ScalarT: generic]( + self: NDArray[number | bool_ | object_], + axis: None, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> ndarray[_1D, dtype[ScalarT]]: ... + @overload # dtype: (positional) + def cumprod( + self: NDArray[number | bool_ | object_], + axis: None, + dtype: DTypeLike, + out: None = None, + ) -> ndarray[_1D]: ... + @overload # axis: + def cumprod[ArrayT: NDArray[number | object_]]( + self: ArrayT, + axis: SupportsIndex, + dtype: None = None, + out: None = None, + ) -> ArrayT: ... + @overload # bool_, axis: + def cumprod[ShapeT: _Shape]( + self: ndarray[ShapeT, dtype[bool_]], + axis: SupportsIndex, + dtype: None = None, + out: None = None, + ) -> ndarray[ShapeT, dtype[int_]]: ... + @overload # axis: , dtype: + def cumprod[ShapeT: _Shape, ScalarT: generic]( + self: ndarray[ShapeT, dtype[number | bool_ | object_]], + axis: SupportsIndex, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> ndarray[ShapeT, dtype[ScalarT]]: ... + @overload # axis: , dtype: + def cumprod[ShapeT: _Shape]( + self: ndarray[ShapeT, dtype[number | bool_ | object_]], + axis: SupportsIndex, + dtype: DTypeLike, + out: None = None, + ) -> ndarray[ShapeT]: ... + @overload # out: ndarray + def cumprod[ArrayT: ndarray]( + self: NDArray[number | bool_ | object_], + axis: SupportsIndex | None, + dtype: DTypeLike | None, + out: ArrayT, + ) -> ArrayT: ... + @overload + def cumprod[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | object_], + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... + + # keep in sync with `MaskedArray.cumsum` + @override # type: ignore[override] + @overload # number | timedelta64 | object_ + def cumsum[DTypeT: dtype[number | timedelta64 | object_]]( + self: ndarray[Any, DTypeT], + axis: None = None, + dtype: None = None, + out: None = None, + ) -> ndarray[_1D, DTypeT]: ... + @overload # bool_ + def cumsum( + self: NDArray[bool_], + axis: None = None, + dtype: None = None, + out: None = None, + ) -> ndarray[_1D, dtype[int_]]: ... + @overload # dtype: (keyword) + def cumsum[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> ndarray[_1D, dtype[ScalarT]]: ... + @overload # dtype: (keyword) + def cumsum( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + *, + dtype: DTypeLike, + out: None = None, + ) -> ndarray[_1D]: ... + @overload # dtype: (positional) + def cumsum[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> ndarray[_1D, dtype[ScalarT]]: ... + @overload # dtype: (positional) + def cumsum( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None, + dtype: DTypeLike, + out: None = None, + ) -> ndarray[_1D]: ... + @overload # axis: + def cumsum[ArrayT: NDArray[number | timedelta64 | object_]]( + self: ArrayT, + axis: SupportsIndex, + dtype: None = None, + out: None = None, + ) -> ArrayT: ... + @overload # bool_, axis: + def cumsum[ShapeT: _Shape]( + self: ndarray[ShapeT, dtype[bool_]], + axis: SupportsIndex, + dtype: None = None, + out: None = None, + ) -> ndarray[ShapeT, dtype[int_]]: ... + @overload # axis: , dtype: + def cumsum[ShapeT: _Shape, ScalarT: generic]( + self: ndarray[ShapeT, dtype[number | bool_ | timedelta64 | object_]], + axis: SupportsIndex, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> ndarray[ShapeT, dtype[ScalarT]]: ... + @overload # axis: , dtype: + def cumsum[ShapeT: _Shape]( + self: ndarray[ShapeT, dtype[number | bool_ | timedelta64 | object_]], + axis: SupportsIndex, + dtype: DTypeLike, + out: None = None, + ) -> ndarray[ShapeT]: ... + @overload # out: ndarray + def cumsum[ArrayT: ndarray]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: SupportsIndex | None, + dtype: DTypeLike | None, + out: ArrayT, + ) -> ArrayT: ... + @overload + def cumsum[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | timedelta64 | object_], + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... + + # + @override # type: ignore[override] + @overload # +integer | ~object_ + def mean( + self: NDArray[integer | bool_ | object_], + axis: None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> float64: ... + @overload # +integer, axis: + def mean( + self: NDArray[integer | bool_], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[float64]: ... + @overload # +integer, keepdims=True + def mean( + self: NDArray[integer | bool_], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[float64]]: ... + @overload # ~inexact | timedelta64 + def mean[ScalarT: inexact | timedelta64]( + self: NDArray[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ScalarT: ... + @overload # ~inexact | timedelta64, axis: + def mean[ScalarT: inexact | timedelta64 | object_]( + self: NDArray[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # ~inexact | timedelta64 | object_, keepdims=True + def mean[ArrayT: NDArray[inexact | timedelta64 | object_]]( + self: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # dtype: ScalarT + def mean[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ScalarT: ... + @overload # dtype: ScalarT (keyword), keepdims=True + def mean[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # dtype: ScalarT (positional), keepdims=True + def mean[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # axis: , dtype: ScalarT + def mean[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # out: ArrayT + def mean[ArrayT: ndarray]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # fallback + def mean( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # fallback, axis: + def mean( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray: ... + @overload # fallback, keepdims=True + def mean( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co]: ... + + # keep in sync with `ndarray.mean` above + @override # type: ignore[override] + @overload # +integer | ~object_ + def std( + self: NDArray[integer | bool_ | object_], + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> float64: ... + @overload # +integer, axis: + def std( + self: NDArray[integer | bool_], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> NDArray[float64]: ... + @overload # +integer, keepdims=True + def std( + self: NDArray[integer | bool_], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[float64]]: ... + @overload # ~inexact | timedelta64 + def std[ScalarT: inexact | timedelta64]( + self: NDArray[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ScalarT: ... + @overload # ~inexact | timedelta64, axis: + def std[ScalarT: inexact | timedelta64 | object_]( + self: NDArray[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # ~inexact | timedelta64 | object_, keepdims=True + def std[ArrayT: NDArray[inexact | timedelta64 | object_]]( + self: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ArrayT: ... + @overload # dtype: ScalarT + def std[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ScalarT: ... + @overload # dtype: ScalarT (keyword), keepdims=True + def std[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # dtype: ScalarT (positional), keepdims=True + def std[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # axis: , dtype: ScalarT + def std[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # out: ArrayT + def std[ArrayT: ndarray]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ddof: float = 0, + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ArrayT: ... + @overload # fallback + def std( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> Any: ... + @overload # fallback, axis: + def std( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray: ... + @overload # fallback, keepdims=True + def std( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray[_ShapeT_co]: ... + + # keep in sync with `ndarray.std` above + @override # type: ignore[override] + @overload # +integer | ~object_ + def var( + self: NDArray[integer | bool_ | object_], + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> float64: ... + @overload # +integer, axis: + def var( + self: NDArray[integer | bool_], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> NDArray[float64]: ... + @overload # +integer, keepdims=True + def var( + self: NDArray[integer | bool_], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[float64]]: ... + @overload # ~inexact | timedelta64 + def var[ScalarT: inexact | timedelta64]( + self: NDArray[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ScalarT: ... + @overload # ~inexact | timedelta64, axis: + def var[ScalarT: inexact | timedelta64 | object_]( + self: NDArray[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # ~inexact | timedelta64 | object_, keepdims=True + def var[ArrayT: NDArray[inexact | timedelta64 | object_]]( + self: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ArrayT: ... + @overload # dtype: ScalarT + def var[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ScalarT: ... + @overload # dtype: ScalarT (keyword), keepdims=True + def var[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # dtype: ScalarT (positional), keepdims=True + def var[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # axis: , dtype: ScalarT + def var[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # out: ArrayT + def var[ArrayT: ndarray]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ddof: float = 0, + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ArrayT: ... + @overload # fallback + def var( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> Any: ... + @overload # fallback, axis: + def var( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray: ... + @overload # fallback, keepdims=True + def var( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray[_ShapeT_co]: ... + + # keep in sync with `ndarray.amin` below + @override # type: ignore[override] + @overload # +number | timedelta64 | datetime64 + def max[ScalarT: number | bool_ | timedelta64 | datetime64]( + self: NDArray[ScalarT], + axis: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ScalarT: ... + @overload # +number | timedelta64 | datetime64 | object_, axis: + def max[ScalarT: number | bool_ | timedelta64 | datetime64 | object_]( + self: NDArray[ScalarT], + axis: int | tuple[int, ...], + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # +number | timedelta64 | datetime64 | object_, keepdims=True + def max[ArrayT: NDArray[number | bool_ | timedelta64 | datetime64 | object_]]( + self: ArrayT, + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # object_ + def max( + self: NDArray[object_], + axis: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # out: ArrayT + def max[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | timedelta64 | datetime64 | object_], + axis: int | tuple[int, ...] | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + + # keep in sync with `ndarray.amax` above + @override # type: ignore[override] + @overload # +number | timedelta64 | datetime64 + def min[ScalarT: number | bool_ | timedelta64 | datetime64]( + self: NDArray[ScalarT], + axis: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ScalarT: ... + @overload # +number | timedelta64 | datetime64 | object_, axis: + def min[ScalarT: number | bool_ | timedelta64 | datetime64 | object_]( + self: NDArray[ScalarT], + axis: int | tuple[int, ...], + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # +number | timedelta64 | datetime64 | object_, keepdims=True + def min[ArrayT: NDArray[number | bool_ | timedelta64 | datetime64 | object_]]( + self: ArrayT, + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # object_ + def min( + self: NDArray[object_], + axis: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # out: ArrayT + def min[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | timedelta64 | datetime64 | object_], + axis: int | tuple[int, ...] | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + + # + @overload + def partition( + self, + kth: _ArrayLikeInt, + /, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> None: ... + @overload + def partition( + self: NDArray[void], + kth: _ArrayLikeInt, + /, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> None: ... + + # keep in sync with `ma.core.MaskedArray.argpartition` + # keep roughly in sync with `_core.fromnumeric.argpartition` + @overload # axis: None + def argpartition( + self, + kth: _ArrayLikeInt, + /, + axis: None, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> ndarray[tuple[int], _dtype[intp]]: ... + @overload # axis: index (default) + def argpartition( + self, + kth: _ArrayLikeInt, + /, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> ndarray[_ShapeT_co, _dtype[intp]]: ... + @overload # void, axis: None + def argpartition( + self: NDArray[void], + kth: _ArrayLikeInt, + /, + axis: None, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> ndarray[tuple[int], _dtype[intp]]: ... + @overload # void, axis: index (default) + def argpartition( + self: NDArray[void], + kth: _ArrayLikeInt, + /, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> ndarray[_ShapeT_co, _dtype[intp]]: ... + + # keep in sync with `ma.MaskedArray.diagonal` + @overload # ?d (workaround) + def diagonal[DTypeT: dtype]( + self: ndarray[tuple[Never, Never, Never, Never], DTypeT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> ndarray[_AnyShape, DTypeT]: ... + @overload # 2d + def diagonal[DTypeT: dtype]( + self: ndarray[tuple[int, int], DTypeT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> ndarray[tuple[int], DTypeT]: ... + @overload # 3d + def diagonal[DTypeT: dtype]( + self: ndarray[tuple[int, int, int], DTypeT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> ndarray[tuple[int, int], DTypeT]: ... + @overload # Nd (fallback) + def diagonal( + self, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + # 1D + 1D returns a scalar; + # all other with at least 1 non-0D array return an ndarray. + @overload + def dot(self, b: _ScalarLike_co, /, out: None = None) -> NDArray[Any]: ... + @overload + def dot(self, b: ArrayLike, /, out: None = None) -> Any: ... + @overload + def dot[ArrayT: ndarray](self, b: ArrayLike, /, out: ArrayT) -> ArrayT: ... + + # keep in sync with `_core.fromnumeric.nonzero` + @overload # ?d (workaround) + def nonzero(self: ndarray[tuple[Never, Never, Never, Never]]) -> tuple[ndarray[_1D, _dtype[intp]], ...]: ... + @overload # 1d + def nonzero(self: ndarray[_1D]) -> tuple[ndarray[_1D, _dtype[intp]]]: ... + @overload # 2d + def nonzero(self: ndarray[_2D]) -> _2Tuple[ndarray[_1D, _dtype[intp]]]: ... + @overload # 3d + def nonzero(self: ndarray[_3D]) -> _3Tuple[ndarray[_1D, _dtype[intp]]]: ... + @overload # 3d + def nonzero(self) -> tuple[ndarray[_1D, _dtype[intp]], ...]: ... + + @overload + def searchsorted( + self, # >= 1D array + v: _ScalarLike_co, # 0D array-like + /, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, + ) -> intp: ... + @overload + def searchsorted( + self, # >= 1D array + v: ArrayLike, + /, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, + ) -> NDArray[intp]: ... + + def sort( + self, + /, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + *, + stable: py_bool | None = None, + ) -> None: ... + + # Keep in sync with `MaskedArray.trace` + @overload + def trace( + self, # >= 2D array + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, + ) -> Any: ... + @overload + def trace[ArrayT: ndarray]( + self, # >= 2D array + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... + @overload + def trace[ArrayT: ndarray]( + self, # >= 2D array + /, + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, dtype: DTypeLike | None, - out: _ArrayT, - ) -> _ArrayT: ... + out: ArrayT, + ) -> ArrayT: ... @overload - def take( - self: NDArray[_ScalarT], + def take[ScalarT: generic]( + self: NDArray[ScalarT], indices: _IntLike_co, /, axis: SupportsIndex | None = ..., out: None = None, mode: _ModeKind = ..., - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def take( self, @@ -2528,24 +3795,24 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): mode: _ModeKind = ..., ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload - def take( + def take[ArrayT: ndarray]( self, indices: _ArrayLikeInt_co, /, axis: SupportsIndex | None = ..., *, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def take( + def take[ArrayT: ndarray]( self, indices: _ArrayLikeInt_co, /, axis: SupportsIndex | None, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # keep in sync with `ma.MaskedArray.repeat` @overload @@ -2560,25 +3827,37 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.reshape` # NOTE: reshape also accepts negative integers, so we can't use integer literals @overload # (None) - def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None) -> Self: ... + def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: py_bool | None = None) -> Self: ... @overload # (empty_sequence) - def reshape( # type: ignore[overload-overlap] # mypy false positive + def reshape( # mypy false positive self, shape: Sequence[Never], /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[()], _DTypeT_co]: ... @overload # (() | (int) | (int, int) | ....) # up to 8-d - def reshape( + def reshape[ + AnyShapeT: ( + tuple[()], # 0d + tuple[int], # 1d + tuple[int, int], # 2d + tuple[int, int, int], # 3d + tuple[int, int, int, int], # 4d + tuple[int, int, int, int, int], # 5d + tuple[int, int, int, int, int, int], # 6d + tuple[int, int, int, int, int, int, int], # 7d + tuple[int, int, int, int, int, int, int, int], # 8d + ) + ]( self, - shape: _AnyShapeT, + shape: AnyShapeT, /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, - ) -> ndarray[_AnyShapeT, _DTypeT_co]: ... + copy: py_bool | None = None, + ) -> ndarray[AnyShapeT, _DTypeT_co]: ... @overload # (index) def reshape( self, @@ -2586,7 +3865,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int], _DTypeT_co]: ... @overload # (index, index) def reshape( @@ -2596,7 +3875,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int], _DTypeT_co]: ... @overload # (index, index, index) def reshape( @@ -2607,7 +3886,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int, int], _DTypeT_co]: ... @overload # (index, index, index, index) def reshape( @@ -2619,7 +3898,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int, int, int], _DTypeT_co]: ... @overload # (int, *(index, ...)) def reshape( @@ -2628,7 +3907,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *shape: SupportsIndex, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload # (sequence[index]) def reshape( @@ -2637,55 +3916,55 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload - def astype( + def astype[ScalarT: generic]( self, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = ..., casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., - ) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + subok: py_bool = ..., + copy: py_bool | _CopyMode = ..., + ) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload def astype( self, dtype: DTypeLike | None, order: _OrderKACF = ..., casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., - ) -> ndarray[_ShapeT_co, dtype]: ... + subok: py_bool = ..., + copy: py_bool | _CopyMode = ..., + ) -> ndarray[_ShapeT_co, _dtype]: ... # @overload # () def view(self, /) -> Self: ... @overload # (dtype: T) - def view(self, /, dtype: _DTypeT | _HasDType[_DTypeT]) -> ndarray[_ShapeT_co, _DTypeT]: ... + def view[DTypeT: _dtype](self, /, dtype: DTypeT | _HasDType[DTypeT]) -> ndarray[_ShapeT_co, DTypeT]: ... @overload # (dtype: dtype[T]) - def view(self, /, dtype: _DTypeLike[_ScalarT]) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + def view[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT]) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload # (type: T) - def view(self, /, *, type: type[_ArrayT]) -> _ArrayT: ... + def view[ArrayT: ndarray](self, /, *, type: type[ArrayT]) -> ArrayT: ... @overload # (_: T) - def view(self, /, dtype: type[_ArrayT]) -> _ArrayT: ... + def view[ArrayT: ndarray](self, /, dtype: type[ArrayT]) -> ArrayT: ... @overload # (dtype: ?) - def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, dtype]: ... + def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, _dtype]: ... @overload # (dtype: ?, type: T) - def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... + def view[ArrayT: ndarray](self, /, dtype: DTypeLike, type: type[ArrayT]) -> ArrayT: ... def setfield(self, val: ArrayLike, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... @overload - def getfield(self, /, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> NDArray[_ScalarT]: ... + def getfield[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT], offset: SupportsIndex = 0) -> NDArray[ScalarT]: ... @overload def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... def __index__(self: NDArray[integer], /) -> int: ... - def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ... + def __complex__(self: NDArray[number | bool_ | object_], /) -> complex: ... def __len__(self) -> int: ... - def __contains__(self, value: object, /) -> builtins.bool: ... + def __contains__(self, value: object, /) -> py_bool: ... # NOTE: This weird `Never` tuple works around a strange mypy issue where it assigns # `tuple[int]` to `tuple[Never]` or `tuple[int, int]` to `tuple[Never, Never]`. @@ -2695,115 +3974,111 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never], Any], / ) -> Iterator[Any]: ... @overload # == 1-d & dtype[T \ object_] - def __iter__(self: ndarray[tuple[int], dtype[_NonObjectScalarT]], /) -> Iterator[_NonObjectScalarT]: ... + def __iter__[ScalarT: _ScalarNotObject](self: ndarray[tuple[int], _dtype[ScalarT]], /) -> Iterator[ScalarT]: ... @overload # == 1-d & StringDType def __iter__(self: ndarray[tuple[int], dtypes.StringDType], /) -> Iterator[str]: ... @overload # >= 2-d - def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], _DTypeT], /) -> Iterator[ndarray[_AnyShape, _DTypeT]]: ... + def __iter__[DTypeT: _dtype]( + self: ndarray[tuple[int, int, *tuple[int, ...]], DTypeT], / + ) -> Iterator[ndarray[_AnyShape, DTypeT]]: ... @overload # ?-d def __iter__(self, /) -> Iterator[Any]: ... # @overload - def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __lt__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / - ) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __lt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __lt__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... # @overload - def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __le__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / - ) -> NDArray[np.bool]: ... + def __le__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + def __le__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __le__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __le__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... # @overload - def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __gt__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / - ) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __gt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __gt__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... # @overload - def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __ge__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / - ) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __ge__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __ge__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... # Unary ops # TODO: Uncomment once https://github.com/python/mypy/issues/14070 is fixed # @overload - # def __abs__(self: ndarray[_ShapeT, dtypes.Complex64DType], /) -> ndarray[_ShapeT, dtypes.Float32DType]: ... + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtypes.Complex64DType], /) -> ndarray[ShapeT, dtypes.Float32DType]: ... # @overload - # def __abs__(self: ndarray[_ShapeT, dtypes.Complex128DType], /) -> ndarray[_ShapeT, dtypes.Float64DType]: ... + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtypes.Complex128DType], /) -> ndarray[ShapeT, dtypes.Float64DType]: ... # @overload - # def __abs__(self: ndarray[_ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[_ShapeT, dtypes.LongDoubleDType]: ... + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[ShapeT, dtypes.LongDoubleDType]: ... # @overload - # def __abs__(self: ndarray[_ShapeT, dtype[complex128]], /) -> ndarray[_ShapeT, dtype[float64]]: ... + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtype[complex128]], /) -> ndarray[ShapeT, dtype[float64]]: ... @overload - def __abs__(self: ndarray[_ShapeT, dtype[complexfloating[_NBit]]], /) -> ndarray[_ShapeT, dtype[floating[_NBit]]]: ... + def __abs__[ShapeT: _Shape, NBitT: NBitBase]( + self: ndarray[ShapeT, _dtype[complexfloating[NBitT]]], / + ) -> ndarray[ShapeT, _dtype[floating[NBitT]]]: ... @overload - def __abs__(self: _RealArrayT, /) -> _RealArrayT: ... + def __abs__[ArrayT: NDArray[bool_ | integer | floating | timedelta64 | object_]](self: ArrayT, /) -> ArrayT: ... - def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 - def __neg__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 - def __pos__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 + def __invert__[ArrayT: NDArray[bool_ | integer | object_]](self: ArrayT, /) -> ArrayT: ... + def __neg__[ArrayT: _ArrayNumeric](self: ArrayT, /) -> ArrayT: ... + def __pos__[ArrayT: _ArrayNumeric](self: ArrayT, /) -> ArrayT: ... # Binary ops # TODO: Support the "1d @ 1d -> scalar" case @overload - def __matmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __matmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __matmul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __matmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2813,11 +4088,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __matmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload @@ -2828,11 +4103,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload # signature equivalent to __matmul__ - def __rmatmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __rmatmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmatmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmatmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2842,11 +4117,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rmatmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload @@ -2857,21 +4132,23 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __mod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __mod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], other: int | bool_, / + ) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __mod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __mod__[ScalarT: floating | integer](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __mod__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __mod__[ScalarT: floating | integer](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __mod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __mod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload @@ -2882,21 +4159,23 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload # signature equivalent to __mod__ - def __rmod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __rmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], other: int | bool_, / + ) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __rmod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rmod__[ScalarT: floating | integer](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __rmod__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rmod__[ScalarT: floating | integer](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rmod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload @@ -2907,42 +4186,54 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __divmod__(self: NDArray[_RealNumberT], rhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + def __divmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], rhs: int | bool_, / + ) -> _2Tuple[ndarray[_ShapeT_co, _dtype[ScalarT]]]: ... @overload - def __divmod__(self: NDArray[_RealNumberT], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + def __divmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], rhs: _ArrayLikeBool_co, / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload - def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + def __divmod__(self: NDArray[bool_], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload - def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + def __divmod__[ScalarT: floating | integer]( + self: NDArray[bool_], rhs: _ArrayLike[ScalarT], / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload def __divmod__(self: NDArray[float64], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload def __divmod__(self: _ArrayFloat64_co, rhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload - def __divmod__(self: _ArrayUInt_co, rhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] + def __divmod__(self: _ArrayUInt_co, rhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... @overload - def __divmod__(self: _ArrayInt_co, rhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + def __divmod__(self: _ArrayInt_co, rhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... @overload def __divmod__(self: _ArrayFloat_co, rhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... @overload def __divmod__(self: NDArray[timedelta64], rhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... @overload # signature equivalent to __divmod__ - def __rdivmod__(self: NDArray[_RealNumberT], lhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + def __rdivmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], lhs: int | bool_, / + ) -> _2Tuple[ndarray[_ShapeT_co, _dtype[ScalarT]]]: ... @overload - def __rdivmod__(self: NDArray[_RealNumberT], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + def __rdivmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], lhs: _ArrayLikeBool_co, / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload - def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + def __rdivmod__(self: NDArray[bool_], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload - def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + def __rdivmod__[ScalarT: floating | integer]( + self: NDArray[bool_], lhs: _ArrayLike[ScalarT], / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload def __rdivmod__(self: NDArray[float64], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload def __rdivmod__(self: _ArrayFloat64_co, lhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload - def __rdivmod__(self: _ArrayUInt_co, lhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] + def __rdivmod__(self: _ArrayUInt_co, lhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... @overload - def __rdivmod__(self: _ArrayInt_co, lhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + def __rdivmod__(self: _ArrayInt_co, lhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... @overload def __rdivmod__(self: _ArrayFloat_co, lhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... @overload @@ -2950,13 +4241,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__add__` @overload - def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __add__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __add__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __add__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __add__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __add__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __add__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2966,15 +4257,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __add__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __add__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + def __add__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload @@ -2998,13 +4289,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__radd__` @overload # signature equivalent to __add__ - def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __radd__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __radd__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __radd__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __radd__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __radd__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __radd__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3014,15 +4305,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __radd__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __radd__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + def __radd__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload @@ -3046,13 +4337,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__sub__` @overload - def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __sub__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __sub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __sub__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + def __sub__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __sub__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3062,15 +4353,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __sub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __sub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + def __sub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload @@ -3084,13 +4375,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rsub__` @overload - def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __rsub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + def __rsub__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rsub__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3100,15 +4391,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __rsub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + def __rsub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload @@ -3122,13 +4413,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__mul__` @overload - def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __mul__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __mul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __mul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __mul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __mul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __mul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3138,11 +4429,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload @@ -3153,7 +4444,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __mul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload def __mul__( - self: ndarray[Any, dtype[character] | dtypes.StringDType], + self: ndarray[Any, _dtype[character] | dtypes.StringDType], other: _ArrayLikeInt, /, ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... @@ -3164,13 +4455,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rmul__` @overload # signature equivalent to __mul__ - def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __rmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __rmul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3180,11 +4471,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload @@ -3195,7 +4486,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rmul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload def __rmul__( - self: ndarray[Any, dtype[character] | dtypes.StringDType], + self: ndarray[Any, _dtype[character] | dtypes.StringDType], other: _ArrayLikeInt, /, ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... @@ -3268,21 +4559,23 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__floordiv__` @overload - def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __floordiv__[ScalarT: integer | floating]( + self: NDArray[ScalarT], other: int | bool_, / + ) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __floordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __floordiv__[ScalarT: integer | floating](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __floordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __floordiv__[ScalarT: integer | floating](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __floordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __floordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload @@ -3298,21 +4591,23 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rfloordiv__` @overload - def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __rfloordiv__[ScalarT: integer | floating]( + self: NDArray[ScalarT], other: int | bool_, / + ) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __rfloordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rfloordiv__[ScalarT: integer | floating](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rfloordiv__[ScalarT: integer | floating](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rfloordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rfloordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload @@ -3326,13 +4621,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__pow__` @overload - def __pow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __pow__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, mod: None = None, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[ScalarT]: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __pow__(self: NDArray[bool_], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... @overload def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload @@ -3344,11 +4639,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / ) -> NDArray[complex128]: ... @overload - def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... @overload - def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... @overload - def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... @overload def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... @overload @@ -3360,13 +4655,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rpow__` @overload - def __rpow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, mod: None = None, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[ScalarT]: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __rpow__(self: NDArray[bool_], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... @overload def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload @@ -3378,11 +4673,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / ) -> NDArray[complex128]: ... @overload - def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... @overload - def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... @overload - def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... @overload def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... @overload @@ -3393,9 +4688,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... @overload - def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __lshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3404,9 +4699,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __rlshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3415,9 +4710,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __rshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3426,9 +4721,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __rrshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3437,9 +4732,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __and__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3448,9 +4743,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __and__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __rand__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3459,9 +4754,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __xor__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3470,9 +4765,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __rxor__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3481,9 +4776,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __or__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3492,9 +4787,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __or__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __ror__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3513,136 +4808,138 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # += @overload # type: ignore[misc] - def __iadd__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __iadd__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __iadd__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __iadd__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __iadd__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + def __iadd__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __iadd__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + def __iadd__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __iadd__(self: _TimeArrayT, other: _ArrayLikeTD64_co, /) -> _TimeArrayT: ... + def __iadd__[ArrayT: NDArray[datetime64 | timedelta64]](self: ArrayT, other: _ArrayLikeTD64_co, /) -> ArrayT: ... @overload - def __iadd__(self: _BytesArrayT, other: _ArrayLikeBytes_co, /) -> _BytesArrayT: ... + def __iadd__[ArrayT: NDArray[bytes_]](self: ArrayT, other: _ArrayLikeBytes_co, /) -> ArrayT: ... @overload - def __iadd__(self: _StringArrayT, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> _StringArrayT: ... + def __iadd__[ArrayT: _ArrayString](self: ArrayT, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> ArrayT: ... @overload - def __iadd__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __iadd__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # -= @overload # type: ignore[misc] - def __isub__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __isub__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __isub__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + def __isub__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __isub__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + def __isub__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __isub__(self: _TimeArrayT, other: _ArrayLikeTD64_co, /) -> _TimeArrayT: ... + def __isub__[ArrayT: NDArray[datetime64 | timedelta64]](self: ArrayT, other: _ArrayLikeTD64_co, /) -> ArrayT: ... @overload - def __isub__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __isub__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # *= @overload # type: ignore[misc] - def __imul__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __imul__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... + @overload + def __imul__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __imul__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __imul__[ArrayT: NDArray[inexact | timedelta64]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __imul__(self: _InexactTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _InexactTimedeltaArrayT: ... + def __imul__[ArrayT: NDArray[number | character]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imul__(self: _NumberCharacterArrayT, other: _ArrayLikeInt_co, /) -> _NumberCharacterArrayT: ... + def __imul__[ArrayT: _ArrayString](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imul__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __imul__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # @= @overload # type: ignore[misc] - def __imatmul__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __imatmul__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __imatmul__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __imatmul__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __imatmul__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + def __imatmul__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __imatmul__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + def __imatmul__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imatmul__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __imatmul__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # **= @overload # type: ignore[misc] - def __ipow__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __ipow__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __ipow__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + def __ipow__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __ipow__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + def __ipow__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ipow__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __ipow__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # /= @overload # type: ignore[misc] - def __itruediv__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __itruediv__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __itruediv__(self: _InexactTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _InexactTimedeltaArrayT: ... + def __itruediv__[ArrayT: NDArray[inexact | timedelta64]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __itruediv__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __itruediv__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # //= # keep in sync with `__imod__` @overload # type: ignore[misc] - def __ifloordiv__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __ifloordiv__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ifloordiv__(self: _FloatingTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _FloatingTimedeltaArrayT: ... + def __ifloordiv__[ArrayT: NDArray[floating | timedelta64]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __ifloordiv__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __ifloordiv__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # %= # keep in sync with `__ifloordiv__` @overload # type: ignore[misc] - def __imod__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __imod__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imod__(self: _FloatingArrayT, other: _ArrayLikeFloat_co, /) -> _FloatingArrayT: ... + def __imod__[ArrayT: NDArray[floating]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __imod__(self: _TimedeltaArrayT, other: _ArrayLike[timedelta64], /) -> _TimedeltaArrayT: ... + def __imod__[ArrayT: NDArray[timedelta64]](self: ArrayT, other: _ArrayLike[timedelta64], /) -> ArrayT: ... @overload - def __imod__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __imod__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # <<= # keep in sync with `__irshift__` @overload # type: ignore[misc] - def __ilshift__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __ilshift__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ilshift__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __ilshift__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # >>= # keep in sync with `__ilshift__` @overload # type: ignore[misc] - def __irshift__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __irshift__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __irshift__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __irshift__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # &= # keep in sync with `__ixor__` and `__ior__` @overload # type: ignore[misc] - def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __iand__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __iand__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __iand__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __iand__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __iand__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # ^= # keep in sync with `__iand__` and `__ior__` @overload # type: ignore[misc] - def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __ixor__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __ixor__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __ixor__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ixor__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __ixor__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # |= # keep in sync with `__iand__` and `__ixor__` @overload # type: ignore[misc] - def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __ior__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __ior__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __ior__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ior__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __ior__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # def __dlpack__( @@ -3652,7 +4949,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): stream: int | Any | None = None, max_version: tuple[int, int] | None = None, dl_device: tuple[int, int] | None = None, - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> CapsuleType: ... def __dlpack_device__(self, /) -> tuple[L[1], L[0]]: ... @@ -3670,46 +4967,74 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @abstractmethod def __new__(cls, /, *args: Any, **kwargs: Any) -> Self: ... - if sys.version_info >= (3, 12): - def __buffer__(self, flags: int, /) -> memoryview: ... + # NOTE: Technically this doesn't exist at runtime, but it is unlikely to lead to + # type-unsafe situations (the abstract scalar types cannot be instantiated + # themselves) and is convenient to have, so we include it regardless. See + # https://github.com/numpy/numpy/issues/30445 for use-cases and discussion. + def __hash__(self, /) -> int: ... + + def __buffer__(self, flags: int, /) -> memoryview: ... + + @overload + def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], _dtype[Self]]: ... + @overload + def __array__[DTypeT: _dtype](self, dtype: DTypeT, /) -> ndarray[tuple[()], DTypeT]: ... + # + @overload + def __getitem__(self, key: tuple[()], /) -> Self: ... + @overload + def __getitem__( + self, key: EllipsisType | tuple[EllipsisType], / + ) -> ndarray[tuple[()], _dtype[Self]]: ... + @overload + def __getitem__( + self, key: None | tuple[None], / + ) -> ndarray[tuple[int], _dtype[Self]]: ... @overload - def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... + def __getitem__( + self, key: tuple[None, None], / + ) -> ndarray[tuple[int, int], _dtype[Self]]: ... @overload - def __array__(self, dtype: _DTypeT, /) -> ndarray[tuple[()], _DTypeT]: ... + def __getitem__( + self, key: tuple[None, None, None], / + ) -> ndarray[tuple[int, int, int], _dtype[Self]]: ... + @overload # Limited support for (None,) * N > 3 + def __getitem__(self, key: tuple[None, ...], /) -> NDArray[Self]: ... + # @overload - def __array_wrap__( + def __array_wrap__[ShapeT: _Shape, DTypeT: _dtype]( self, - array: ndarray[_ShapeT, _DTypeT], + array: ndarray[ShapeT, DTypeT], context: tuple[ufunc, tuple[object, ...], int] | None, return_scalar: L[False], /, - ) -> ndarray[_ShapeT, _DTypeT]: ... + ) -> ndarray[ShapeT, DTypeT]: ... @overload - def __array_wrap__( + def __array_wrap__[ScalarT: generic]( self, - array: ndarray[tuple[()], dtype[_ScalarT]], + array: ndarray[tuple[()], _dtype[ScalarT]], context: tuple[ufunc, tuple[object, ...], int] | None = None, return_scalar: L[True] = True, /, - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload - def __array_wrap__( + def __array_wrap__[ShapeT: tuple[int, *tuple[int, ...]], DTypeT: _dtype]( self, - array: ndarray[_Shape1T, _DTypeT], + array: ndarray[ShapeT, DTypeT], context: tuple[ufunc, tuple[object, ...], int] | None = None, return_scalar: L[True] = True, /, - ) -> ndarray[_Shape1T, _DTypeT]: ... + ) -> ndarray[ShapeT, DTypeT]: ... @overload - def __array_wrap__( + def __array_wrap__[ShapeT: _Shape, ScalarT: generic]( self, - array: ndarray[_ShapeT, dtype[_ScalarT]], + array: ndarray[ShapeT, _dtype[ScalarT]], context: tuple[ufunc, tuple[object, ...], int] | None = None, return_scalar: L[True] = True, /, - ) -> _ScalarT | ndarray[_ShapeT, dtype[_ScalarT]]: ... + ) -> ScalarT | ndarray[ShapeT, _dtype[ScalarT]]: ... @property def base(self) -> None: ... @@ -3722,7 +5047,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @property def strides(self) -> tuple[()]: ... @property - def flat(self) -> flatiter[ndarray[tuple[int], dtype[Self]]]: ... + def flat(self) -> flatiter[ndarray[tuple[int], _dtype[Self]]]: ... @overload def item(self, /) -> _ItemT_co: ... @@ -3748,26 +5073,28 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): def setfield(self: Never, val: Never, /, dtype: Never, offset: L[0] = 0) -> None: ... # type: ignore[misc] def searchsorted(self: Never, v: Never, /, side: L["left"] = "left", sorter: None = None) -> Never: ... # type: ignore[misc] - # NOTE: this wont't raise, but won't do anything either + # NOTE: this won't raise, but won't do anything either @overload - def resize(self, /, *, refcheck: builtins.bool = True) -> None: ... + @deprecated("Resizing a NumPy generic inplace has been deprecated in NumPy 2.5") + def resize(self, /, *, refcheck: py_bool = True) -> None: ... @overload - def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: builtins.bool = True) -> None: ... + @deprecated("Resizing a NumPy generic inplace has been deprecated in NumPy 2.5") + def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: py_bool = True) -> None: ... # def byteswap(self, /, inplace: L[False] = False) -> Self: ... # @overload - def astype( + def astype[ScalarT: generic]( self, /, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", casting: _CastingKind = "unsafe", - subok: builtins.bool = True, - copy: builtins.bool | _CopyMode = True, - ) -> _ScalarT: ... + subok: py_bool = True, + copy: py_bool | _CopyMode = True, + ) -> ScalarT: ... @overload def astype( self, @@ -3775,8 +5102,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): dtype: DTypeLike | None, order: _OrderKACF = "K", casting: _CastingKind = "unsafe", - subok: builtins.bool = True, - copy: builtins.bool | _CopyMode = True, + subok: py_bool = True, + copy: py_bool | _CopyMode = True, ) -> Incomplete: ... # NOTE: `view` will perform a 0D->scalar cast, @@ -3784,12 +5111,12 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @overload def view(self, type: type[ndarray] = ...) -> Self: ... @overload - def view(self, /, dtype: _DTypeLike[_ScalarT], type: type[ndarray] = ...) -> _ScalarT: ... + def view[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT], type: type[ndarray] = ...) -> ScalarT: ... @overload def view(self, /, dtype: DTypeLike, type: type[ndarray] = ...) -> Incomplete: ... @overload - def getfield(self, /, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> _ScalarT: ... + def getfield[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT], offset: SupportsIndex = 0) -> ScalarT: ... @overload def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> Incomplete: ... @@ -3812,47 +5139,47 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): mode: _ModeKind = "raise", ) -> NDArray[Self]: ... @overload - def take( + def take[ArrayT: ndarray]( self, indices: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None, *, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def take( + def take[ArrayT: ndarray]( self, indices: _ArrayLikeInt_co, /, axis: SupportsIndex | None, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", - ) -> _ArrayT: ... + ) -> ArrayT: ... - def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... - def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... - def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None) -> ndarray[tuple[int], _dtype[Self]]: ... + def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _dtype[Self]]: ... + def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _dtype[Self]]: ... - @overload # (() | []) + @overload # (()) def reshape( self, shape: tuple[()] | list[Never], /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> Self: ... - @overload # ((1, *(1, ...))@_ShapeT) - def reshape( + @overload # (ShapeT: (index, ...)) + def reshape[ShapeT: tuple[int, *tuple[int, ...]]]( self, - shape: _1NShapeT, + shape: ShapeT, /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, - ) -> ndarray[_1NShapeT, dtype[Self]]: ... + copy: py_bool | None = None, + ) -> ndarray[ShapeT, _dtype[Self]]: ... @overload # (Sequence[index, ...]) # not recommended def reshape( self, @@ -3860,8 +5187,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, - ) -> Self | ndarray[tuple[L[1], ...], dtype[Self]]: ... + copy: py_bool | None = None, + ) -> NDArray[Self] | Any: ... @overload # _(index) def reshape( self, @@ -3869,8 +5196,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1]], dtype[Self]]: ... + copy: py_bool | None = None, + ) -> ndarray[tuple[int], _dtype[Self]]: ... @overload # _(index, index) def reshape( self, @@ -3879,8 +5206,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1], L[1]], dtype[Self]]: ... + copy: py_bool | None = None, + ) -> ndarray[tuple[int, int], _dtype[Self]]: ... @overload # _(index, index, index) def reshape( self, @@ -3890,8 +5217,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1], L[1], L[1]], dtype[Self]]: ... + copy: py_bool | None = None, + ) -> ndarray[tuple[int, int, int], _dtype[Self]]: ... @overload # _(index, index, index, index) def reshape( self, @@ -3902,8 +5229,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1], L[1], L[1], L[1]], dtype[Self]]: ... + copy: py_bool | None = None, + ) -> ndarray[tuple[int, int, int, int], _dtype[Self]]: ... @overload # _(index, index, index, index, index, *index) # ndim >= 5 def reshape( self, @@ -3915,8 +5242,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *sizes6_: SupportsIndex, order: _OrderACF = "C", - copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1], L[1], L[1], L[1], L[1], *tuple[L[1], ...]], dtype[Self]]: ... + copy: py_bool | None = None, + ) -> ndarray[tuple[int, int, int, int, int, *tuple[int, ...]], _dtype[Self]]: ... def squeeze(self, axis: L[0] | tuple[()] | None = ...) -> Self: ... def transpose(self, axes: tuple[()] | None = ..., /) -> Self: ... @@ -3929,28 +5256,28 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: None = None, keepdims: SupportsIndex = False, *, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True - ) -> np.bool: ... + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True + ) -> bool_: ... @overload - def all( + def all[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None, - out: ndarray[tuple[()], dtype[_ScalarT]], + out: ndarray[tuple[()], _dtype[ScalarT]], keepdims: SupportsIndex = False, *, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _ScalarT: ... + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True, + ) -> ScalarT: ... @overload - def all( + def all[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None = None, *, - out: ndarray[tuple[()], dtype[_ScalarT]], + out: ndarray[tuple[()], _dtype[ScalarT]], keepdims: SupportsIndex = False, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _ScalarT: ... + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True, + ) -> ScalarT: ... @overload def any( @@ -3960,34 +5287,34 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: None = None, keepdims: SupportsIndex = False, *, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True - ) -> np.bool: ... + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True + ) -> bool_: ... @overload - def any( + def any[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None, - out: ndarray[tuple[()], dtype[_ScalarT]], + out: ndarray[tuple[()], _dtype[ScalarT]], keepdims: SupportsIndex = False, *, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _ScalarT: ... + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True, + ) -> ScalarT: ... @overload - def any( + def any[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None = None, *, - out: ndarray[tuple[()], dtype[_ScalarT]], + out: ndarray[tuple[()], _dtype[ScalarT]], keepdims: SupportsIndex = False, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _ScalarT: ... + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True, + ) -> ScalarT: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property def dtype(self) -> _dtype[Self]: ... -class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): +class number(generic[_NumberItemT_co], Generic[_NBitT, _NumberItemT_co]): @abstractmethod # `SupportsIndex | str | bytes` equivs `_ConvertibleToInt & _ConvertibleToFloat` def __new__(cls, value: SupportsIndex | str | bytes = 0, /) -> Self: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @@ -4035,6 +5362,100 @@ class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): @overload def __ge__(self, other: _SupportsLE, /) -> bool_: ... + # keep in sync with `number.sum` + @override # type: ignore[override] + @overload # out: None (default) + def prod( + self, + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Self: ... + @overload # dtype: (keyword) + def prod( + self, + axis: _ShapeLike | None = None, + *, + dtype: DTypeLike, + out: None = None, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # dtype: (positional) + def prod( + self, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # out: + def prod[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + + # keep in sync with `number.prod` + @override # type: ignore[override] + @overload # out: None (default) + def sum( + self, + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Self: ... + @overload # dtype: (keyword) + def sum( + self, + axis: _ShapeLike | None = None, + *, + dtype: DTypeLike, + out: None = None, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # dtype: (positional) + def sum( + self, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # out: + def sum[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @property def itemsize(self) -> L[1]: ... @@ -4043,41 +5464,41 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @property def real(self) -> Self: ... @property - def imag(self) -> np.bool[L[False]]: ... + def imag(self) -> bool_[L[False]]: ... @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245 - def __new__(cls, value: Never, /) -> np.bool[builtins.bool]: ... + def __new__(cls, value: Never, /) -> bool_[py_bool]: ... @overload - def __new__(cls, value: _Falsy = ..., /) -> np.bool[L[False]]: ... + def __new__(cls, value: _Falsy = ..., /) -> bool_[L[False]]: ... @overload - def __new__(cls, value: _Truthy, /) -> np.bool[L[True]]: ... + def __new__(cls, value: _Truthy, /) -> bool_[L[True]]: ... @overload - def __new__(cls, value: object, /) -> np.bool[builtins.bool]: ... + def __new__(cls, value: object, /) -> bool_[py_bool]: ... def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... def __bool__(self, /) -> _BoolItemT_co: ... @overload - def __int__(self: np.bool[L[False]], /) -> L[0]: ... + def __int__(self: bool_[L[False]], /) -> L[0]: ... @overload - def __int__(self: np.bool[L[True]], /) -> L[1]: ... + def __int__(self: bool_[L[True]], /) -> L[1]: ... @overload def __int__(self, /) -> L[0, 1]: ... def __abs__(self) -> Self: ... @overload - def __invert__(self: np.bool[L[False]], /) -> np.bool[L[True]]: ... + def __invert__(self: bool_[L[False]], /) -> bool_[L[True]]: ... @overload - def __invert__(self: np.bool[L[True]], /) -> np.bool[L[False]]: ... + def __invert__(self: bool_[L[True]], /) -> bool_[L[False]]: ... @overload - def __invert__(self, /) -> np.bool: ... + def __invert__(self, /) -> bool_: ... @overload - def __add__(self, other: _NumberT, /) -> _NumberT: ... + def __add__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload - def __add__(self, other: builtins.bool | bool_, /) -> bool_: ... + def __add__(self, other: py_bool | bool_, /) -> bool_: ... @overload def __add__(self, other: int, /) -> int_: ... @overload @@ -4086,9 +5507,9 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __add__(self, other: complex, /) -> complex128: ... @overload - def __radd__(self, other: _NumberT, /) -> _NumberT: ... + def __radd__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload - def __radd__(self, other: builtins.bool, /) -> bool_: ... + def __radd__(self, other: py_bool, /) -> bool_: ... @overload def __radd__(self, other: int, /) -> int_: ... @overload @@ -4097,7 +5518,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __radd__(self, other: complex, /) -> complex128: ... @overload - def __sub__(self, other: _NumberT, /) -> _NumberT: ... + def __sub__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload def __sub__(self, other: int, /) -> int_: ... @overload @@ -4106,7 +5527,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __sub__(self, other: complex, /) -> complex128: ... @overload - def __rsub__(self, other: _NumberT, /) -> _NumberT: ... + def __rsub__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload def __rsub__(self, other: int, /) -> int_: ... @overload @@ -4115,9 +5536,9 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __rsub__(self, other: complex, /) -> complex128: ... @overload - def __mul__(self, other: _NumberT, /) -> _NumberT: ... + def __mul__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload - def __mul__(self, other: builtins.bool | bool_, /) -> bool_: ... + def __mul__(self, other: py_bool | bool_, /) -> bool_: ... @overload def __mul__(self, other: int, /) -> int_: ... @overload @@ -4126,9 +5547,9 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __mul__(self, other: complex, /) -> complex128: ... @overload - def __rmul__(self, other: _NumberT, /) -> _NumberT: ... + def __rmul__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rmul__(self, other: builtins.bool, /) -> bool_: ... + def __rmul__(self, other: py_bool, /) -> bool_: ... @overload def __rmul__(self, other: int, /) -> int_: ... @overload @@ -4137,9 +5558,9 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __rmul__(self, other: complex, /) -> complex128: ... @overload - def __pow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + def __pow__[ScalarT: number](self, other: ScalarT, mod: None = None, /) -> ScalarT: ... @overload - def __pow__(self, other: builtins.bool | bool_, mod: None = None, /) -> int8: ... + def __pow__(self, other: py_bool | bool_, mod: None = None, /) -> int8: ... @overload def __pow__(self, other: int, mod: None = None, /) -> int_: ... @overload @@ -4148,9 +5569,9 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... @overload - def __rpow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + def __rpow__[ScalarT: number](self, other: ScalarT, mod: None = None, /) -> ScalarT: ... @overload - def __rpow__(self, other: builtins.bool, mod: None = None, /) -> int8: ... + def __rpow__(self, other: py_bool, mod: None = None, /) -> int8: ... @overload def __rpow__(self, other: int, mod: None = None, /) -> int_: ... @overload @@ -4159,32 +5580,32 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... @overload - def __truediv__(self, other: _InexactT, /) -> _InexactT: ... + def __truediv__[ScalarT: inexact](self, other: ScalarT, /) -> ScalarT: ... @overload def __truediv__(self, other: float | integer | bool_, /) -> float64: ... @overload def __truediv__(self, other: complex, /) -> complex128: ... @overload - def __rtruediv__(self, other: _InexactT, /) -> _InexactT: ... + def __rtruediv__[ScalarT: inexact](self, other: ScalarT, /) -> ScalarT: ... @overload def __rtruediv__(self, other: float | integer, /) -> float64: ... @overload def __rtruediv__(self, other: complex, /) -> complex128: ... @overload - def __floordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + def __floordiv__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload - def __floordiv__(self, other: builtins.bool | bool_, /) -> int8: ... + def __floordiv__(self, other: py_bool | bool_, /) -> int8: ... @overload def __floordiv__(self, other: int, /) -> int_: ... @overload def __floordiv__(self, other: float, /) -> float64: ... @overload - def __rfloordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + def __rfloordiv__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rfloordiv__(self, other: builtins.bool, /) -> int8: ... + def __rfloordiv__(self, other: py_bool, /) -> int8: ... @overload def __rfloordiv__(self, other: int, /) -> int_: ... @overload @@ -4192,9 +5613,9 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): # keep in sync with __floordiv__ @overload - def __mod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + def __mod__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload - def __mod__(self, other: builtins.bool | bool_, /) -> int8: ... + def __mod__(self, other: py_bool | bool_, /) -> int8: ... @overload def __mod__(self, other: int, /) -> int_: ... @overload @@ -4202,9 +5623,9 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): # keep in sync with __rfloordiv__ @overload - def __rmod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + def __rmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rmod__(self, other: builtins.bool, /) -> int8: ... + def __rmod__(self, other: py_bool, /) -> int8: ... @overload def __rmod__(self, other: int, /) -> int_: ... @overload @@ -4212,9 +5633,9 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): # keep in sync with __mod__ @overload - def __divmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + def __divmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> _2Tuple[ScalarT]: ... @overload - def __divmod__(self, other: builtins.bool | bool_, /) -> _2Tuple[int8]: ... + def __divmod__(self, other: py_bool | bool_, /) -> _2Tuple[int8]: ... @overload def __divmod__(self, other: int, /) -> _2Tuple[int_]: ... @overload @@ -4222,84 +5643,84 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): # keep in sync with __rmod__ @overload - def __rdivmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + def __rdivmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> _2Tuple[ScalarT]: ... @overload - def __rdivmod__(self, other: builtins.bool, /) -> _2Tuple[int8]: ... + def __rdivmod__(self, other: py_bool, /) -> _2Tuple[int8]: ... @overload def __rdivmod__(self, other: int, /) -> _2Tuple[int_]: ... @overload def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... @overload - def __lshift__(self, other: _IntegerT, /) -> _IntegerT: ... + def __lshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __lshift__(self, other: builtins.bool | bool_, /) -> int8: ... + def __lshift__(self, other: py_bool | bool_, /) -> int8: ... @overload def __lshift__(self, other: int, /) -> int_: ... @overload - def __rlshift__(self, other: _IntegerT, /) -> _IntegerT: ... + def __rlshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rlshift__(self, other: builtins.bool, /) -> int8: ... + def __rlshift__(self, other: py_bool, /) -> int8: ... @overload def __rlshift__(self, other: int, /) -> int_: ... # keep in sync with __lshift__ @overload - def __rshift__(self, other: _IntegerT, /) -> _IntegerT: ... + def __rshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rshift__(self, other: builtins.bool | bool_, /) -> int8: ... + def __rshift__(self, other: py_bool | bool_, /) -> int8: ... @overload def __rshift__(self, other: int, /) -> int_: ... # keep in sync with __rlshift__ @overload - def __rrshift__(self, other: _IntegerT, /) -> _IntegerT: ... + def __rrshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rrshift__(self, other: builtins.bool, /) -> int8: ... + def __rrshift__(self, other: py_bool, /) -> int8: ... @overload def __rrshift__(self, other: int, /) -> int_: ... @overload - def __and__(self: np.bool[L[False]], other: builtins.bool | np.bool, /) -> np.bool[L[False]]: ... + def __and__(self: bool_[L[False]], other: py_bool | bool_, /) -> bool_[L[False]]: ... @overload - def __and__(self, other: L[False] | np.bool[L[False]], /) -> np.bool[L[False]]: ... + def __and__(self, other: L[False] | bool_[L[False]], /) -> bool_[L[False]]: ... @overload - def __and__(self, other: L[True] | np.bool[L[True]], /) -> Self: ... + def __and__(self, other: L[True] | bool_[L[True]], /) -> Self: ... @overload - def __and__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + def __and__(self, other: py_bool | bool_, /) -> bool_: ... @overload - def __and__(self, other: _IntegerT, /) -> _IntegerT: ... + def __and__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __and__(self, other: int, /) -> np.bool | intp: ... + def __and__(self, other: int, /) -> bool_ | intp: ... __rand__ = __and__ @overload - def __xor__(self: np.bool[L[False]], other: _BoolItemT | np.bool[_BoolItemT], /) -> np.bool[_BoolItemT]: ... + def __xor__[ItemT: py_bool](self: bool_[L[False]], other: ItemT | bool_[ItemT], /) -> bool_[ItemT]: ... @overload - def __xor__(self: np.bool[L[True]], other: L[True] | np.bool[L[True]], /) -> np.bool[L[False]]: ... + def __xor__(self: bool_[L[True]], other: L[True] | bool_[L[True]], /) -> bool_[L[False]]: ... @overload - def __xor__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + def __xor__(self, other: L[False] | bool_[L[False]], /) -> Self: ... @overload - def __xor__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + def __xor__(self, other: py_bool | bool_, /) -> bool_: ... @overload - def __xor__(self, other: _IntegerT, /) -> _IntegerT: ... + def __xor__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __xor__(self, other: int, /) -> np.bool | intp: ... + def __xor__(self, other: int, /) -> bool_ | intp: ... __rxor__ = __xor__ @overload - def __or__(self: np.bool[L[True]], other: builtins.bool | np.bool, /) -> np.bool[L[True]]: ... + def __or__(self: bool_[L[True]], other: py_bool | bool_, /) -> bool_[L[True]]: ... @overload - def __or__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + def __or__(self, other: L[False] | bool_[L[False]], /) -> Self: ... @overload - def __or__(self, other: L[True] | np.bool[L[True]], /) -> np.bool[L[True]]: ... + def __or__(self, other: L[True] | bool_[L[True]], /) -> bool_[L[True]]: ... @overload - def __or__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + def __or__(self, other: py_bool | bool_, /) -> bool_: ... @overload - def __or__(self, other: _IntegerT, /) -> _IntegerT: ... + def __or__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __or__(self, other: int, /) -> np.bool | intp: ... + def __or__(self, other: int, /) -> bool_ | intp: ... __ror__ = __or__ @overload @@ -4330,7 +5751,101 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __ge__(self, other: _SupportsLE, /) -> bool_: ... -# NOTE: This should _not_ be `Final` or a `TypeAlias` + # keep in sync with `bool.sum` + @override # type: ignore[override] + @overload # out: None (default) + def prod( + self, + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> int_: ... + @overload # dtype: (keyword) + def prod( + self, + axis: _ShapeLike | None = None, + *, + dtype: DTypeLike, + out: None = None, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # dtype: (positional) + def prod( + self, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # out: + def prod[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + + # keep in sync with `bool.prod` + @override # type: ignore[override] + @overload # out: None (default) + def sum( + self, + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> int_: ... + @overload # dtype: (keyword) + def sum( + self, + axis: _ShapeLike | None = None, + *, + dtype: DTypeLike, + out: None = None, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # dtype: (positional) + def sum( + self, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # out: + def sum[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + +# NOTE: This should _not_ be `Final[_]`, `_: TypeAlias`, or `type _` bool_ = bool # NOTE: The `object_` constructor returns the passed object, so instances with type @@ -4342,13 +5857,13 @@ class object_(_RealMixin, generic): @overload def __new__(cls, value: None = None, /) -> None: ... # type: ignore[misc] @overload - def __new__(cls, value: _AnyStr, /) -> _AnyStr: ... # type: ignore[misc] + def __new__[AnyStrT: (LiteralString, str, bytes)](cls, value: AnyStrT, /) -> AnyStrT: ... # type: ignore[misc] @overload - def __new__(cls, value: ndarray[_ShapeT, Any], /) -> ndarray[_ShapeT, dtype[Self]]: ... # type: ignore[misc] + def __new__[ShapeT: _Shape](cls, value: ndarray[ShapeT, Any], /) -> ndarray[ShapeT, dtype[Self]]: ... # type: ignore[misc] @overload def __new__(cls, value: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc] @overload - def __new__(cls, value: _T, /) -> _T: ... # type: ignore[misc] + def __new__[T](cls, value: T, /) -> T: ... # type: ignore[misc] @overload # catch-all def __new__(cls, value: Any = ..., /) -> object | NDArray[Self]: ... # type: ignore[misc] @@ -4356,10 +5871,9 @@ class object_(_RealMixin, generic): def __abs__(self, /) -> object_: ... # this affects NDArray[object_].__abs__ def __call__(self, /, *args: object, **kwargs: object) -> Any: ... - if sys.version_info >= (3, 12): - def __release_buffer__(self, buffer: memoryview, /) -> None: ... + def __release_buffer__(self, buffer: memoryview, /) -> None: ... -class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): +class integer(_IntegralMixin, _RoundMixin, number[_NBitT, int]): @abstractmethod def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... @@ -4399,7 +5913,7 @@ class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): def __xor__(self, other: _IntLike_co, /) -> integer: ... def __rxor__(self, other: _IntLike_co, /) -> integer: ... -class signedinteger(integer[_NBit]): +class signedinteger(integer[_NBitT]): def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... # arithmetic ops @@ -4637,7 +6151,7 @@ int_ = intp long = signedinteger[_NBitLong] longlong = signedinteger[_NBitLongLong] -class unsignedinteger(integer[_NBit1]): +class unsignedinteger(integer[_NBitT]): def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... # arithmetic ops @@ -4882,24 +6396,24 @@ class unsignedinteger(integer[_NBit1]): @overload def __ror__(self, other: signedinteger, /) -> signedinteger: ... -uint8: TypeAlias = unsignedinteger[_8Bit] -uint16: TypeAlias = unsignedinteger[_16Bit] -uint32: TypeAlias = unsignedinteger[_32Bit] -uint64: TypeAlias = unsignedinteger[_64Bit] +uint8 = unsignedinteger[_8Bit] +uint16 = unsignedinteger[_16Bit] +uint32 = unsignedinteger[_32Bit] +uint64 = unsignedinteger[_64Bit] -ubyte: TypeAlias = unsignedinteger[_NBitByte] -ushort: TypeAlias = unsignedinteger[_NBitShort] -uintc: TypeAlias = unsignedinteger[_NBitIntC] -uintp: TypeAlias = unsignedinteger[_NBitIntP] -uint: TypeAlias = uintp -ulong: TypeAlias = unsignedinteger[_NBitLong] -ulonglong: TypeAlias = unsignedinteger[_NBitLongLong] +ubyte = unsignedinteger[_NBitByte] +ushort = unsignedinteger[_NBitShort] +uintc = unsignedinteger[_NBitIntC] +uintp = unsignedinteger[_NBitIntP] +uint = uintp +ulong = unsignedinteger[_NBitLong] +ulonglong = unsignedinteger[_NBitLongLong] -class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]): +class inexact(number[_NBitT, _InexactItemT_co], Generic[_NBitT, _InexactItemT_co]): @abstractmethod def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... -class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): +class floating(_RealMixin, _RoundMixin, inexact[_NBitT, float]): def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... # arithmetic ops @@ -5049,11 +6563,11 @@ class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): def __rdivmod__(self, other: float, /) -> _2Tuple[Self]: ... # NOTE: `is_integer` and `as_integer_ratio` are technically defined in the concrete subtypes - def is_integer(self, /) -> builtins.bool: ... + def is_integer(self, /) -> py_bool: ... def as_integer_ratio(self, /) -> tuple[int, int]: ... -float16: TypeAlias = floating[_16Bit] -float32: TypeAlias = floating[_32Bit] +float16 = floating[_16Bit] +float32 = floating[_32Bit] # either a C `double`, `float`, or `longdouble` class float64(floating[_64Bit], float): # type: ignore[misc] @@ -5083,7 +6597,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __add__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __add__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __add__(self, other: complex, /) -> float64 | complex128: ... @@ -5092,7 +6606,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __radd__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __radd__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __radd__(self, other: complex, /) -> float64 | complex128: ... @@ -5101,7 +6615,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __sub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __sub__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __sub__(self, other: complex, /) -> float64 | complex128: ... @@ -5110,7 +6624,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rsub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rsub__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __rsub__(self, other: complex, /) -> float64 | complex128: ... @@ -5119,7 +6633,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __mul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __mul__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __mul__(self, other: complex, /) -> float64 | complex128: ... @@ -5128,7 +6642,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rmul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rmul__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __rmul__(self, other: complex, /) -> float64 | complex128: ... @@ -5137,7 +6651,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __truediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __truediv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __truediv__(self, other: complex, /) -> float64 | complex128: ... @@ -5146,7 +6660,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rtruediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rtruediv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __rtruediv__(self, other: complex, /) -> float64 | complex128: ... @@ -5155,7 +6669,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __floordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __floordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __floordiv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __floordiv__(self, other: complex, /) -> float64 | complex128: ... @@ -5164,7 +6678,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rfloordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __rfloordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rfloordiv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ... @@ -5173,9 +6687,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __pow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... @overload - def __pow__( - self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / - ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __pow__[NBitT: NBitBase](self, other: complexfloating[NBitT], mod: None = None, /) -> complexfloating[NBitT | _64Bit]: ... @overload def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... @@ -5184,9 +6696,9 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... # type: ignore[misc] @overload - def __rpow__( - self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / - ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rpow__[NBitT: NBitBase]( + self, other: complexfloating[NBitT], mod: None = None, / + ) -> complexfloating[NBitT | _64Bit]: ... @overload def __rpow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... @@ -5196,16 +6708,16 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[misc] -half: TypeAlias = float16 -single: TypeAlias = float32 -double: TypeAlias = float64 -longdouble: TypeAlias = floating[_NBitLongDouble] +half = float16 +single = float32 +double = float64 +longdouble = floating[_NBitLongDouble] # The main reason for `complexfloating` having two typevars is cosmetic. # It is used to clarify why `complex128`s precision is `_64Bit`, the latter # describing the two 64 bit floats representing its real and imaginary component -class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): +class complexfloating(inexact[_NBitT1, complex], Generic[_NBitT1, _NBitT2]): @overload def __new__( cls, @@ -5217,91 +6729,101 @@ class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): def __new__(cls, real: _ConvertibleToComplex | None = 0, /) -> Self: ... @property - def real(self) -> floating[_NBit1]: ... + def real(self) -> floating[_NBitT1]: ... @property - def imag(self) -> floating[_NBit2]: ... + def imag(self) -> floating[_NBitT2]: ... # NOTE: `__complex__` is technically defined in the concrete subtypes def __complex__(self, /) -> complex: ... - def __abs__(self, /) -> floating[_NBit1 | _NBit2]: ... # type: ignore[override] + def __abs__(self, /) -> floating[_NBitT1 | _NBitT2]: ... # type: ignore[override] @overload # type: ignore[override] - def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __add__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __add__[NBitT: NBitBase](self, other: number[NBitT], /) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __radd__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __radd__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __radd__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __radd__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __sub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __sub__[NBitT: NBitBase](self, other: number[NBitT], /) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __rsub__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __rsub__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rsub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __rsub__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __mul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __mul__[NBitT: NBitBase](self, other: number[NBitT], /) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __rmul__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __rmul__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rmul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __rmul__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __truediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __truediv__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __rtruediv__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __rtruediv__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rtruediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __rtruediv__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... + def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload def __pow__( self, other: complex | float64 | complex128, mod: None = None, / - ) -> complexfloating[_NBit1, _NBit2] | complex128: ... + ) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __pow__( - self, other: number[_NBit], mod: None = None, / - ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __pow__[NBitT: NBitBase]( + self, other: number[NBitT], mod: None = None, / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... + def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rpow__( - self, other: number[_NBit], mod: None = None, / - ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __rpow__[NBitT: NBitBase]( + self, other: number[NBitT], mod: None = None, / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... -complex64: TypeAlias = complexfloating[_32Bit] +complex64 = complexfloating[_32Bit] class complex128(complexfloating[_64Bit, _64Bit], complex): @property @@ -5322,38 +6844,36 @@ class complex128(complexfloating[_64Bit, _64Bit], complex): @overload # type: ignore[override] def __add__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __add__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... def __radd__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] @overload # type: ignore[override] def __sub__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __sub__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... def __rsub__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] @overload # type: ignore[override] def __mul__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __mul__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... def __rmul__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] @overload # type: ignore[override] def __truediv__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __truediv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] @overload # type: ignore[override] def __pow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... @overload - def __pow__( - self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / - ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __pow__[NBitT: NBitBase](self, other: complexfloating[NBitT], mod: None = None, /) -> complexfloating[NBitT | _64Bit]: ... def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... # type: ignore[override] -csingle: TypeAlias = complex64 -cdouble: TypeAlias = complex128 -clongdouble: TypeAlias = complexfloating[_NBitLongDouble] +csingle = complex64 +cdouble = complex128 +clongdouble = complexfloating[_NBitLongDouble] class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co]): @property @@ -5362,26 +6882,60 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def nbytes(self) -> L[8]: ... @overload - def __new__(cls, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> Self: ... + def __new__(cls, value: timedelta64[_TD64ItemT_co], /) -> Self: ... + @overload + @deprecated( + "Using 'generic' unit for NumPy timedelta is deprecated, and will raise an error in the future. " + "Please use a specific units instead." + ) + def __new__(cls, value: _TD64ItemT_co, /) -> Self: ... @overload + @deprecated( + "Using 'generic' unit for NumPy timedelta is deprecated, and will raise an error in the future. " + "Please use a specific units instead." + ) def __new__(cls, /) -> timedelta64[L[0]]: ... @overload - def __new__(cls, value: _NaTValue | None, format: _TimeUnitSpec, /) -> timedelta64[None]: ... + def __new__(cls, value: _NaTValue | None, format: _TimeUnitSpec[_TD64Unit], /) -> timedelta64[None]: ... + @overload + @deprecated( + "Using 'generic' unit for NumPy timedelta is deprecated, and will raise an error in the future. " + "Please use a specific units instead." + ) + def __new__(cls, value: L[0], /) -> timedelta64[L[0]]: ... + @overload + def __new__(cls, value: L[0], format: _TimeUnitSpec[_IntTD64Unit], /) -> timedelta64[L[0]]: ... @overload - def __new__(cls, value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[L[0]]: ... + @deprecated( + "Using 'generic' unit for NumPy timedelta is deprecated, and will raise an error in the future. " + "Please use a specific units instead." + ) + def __new__(cls, value: _IntLike_co, /) -> timedelta64[int]: ... @overload - def __new__(cls, value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[int]: ... + def __new__(cls, value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit], /) -> timedelta64[int]: ... @overload def __new__(cls, value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> timedelta64[int]: ... @overload + @deprecated( + "Using 'generic' unit for NumPy timedelta is deprecated, and will raise an error in the future. " + "Please use a specific units instead." + ) + def __new__(cls, value: dt.timedelta | _IntLike_co, /) -> timedelta64[dt.timedelta]: ... + @overload def __new__( cls, value: dt.timedelta | _IntLike_co, - format: _TimeUnitSpec[_NativeTD64Unit] = ..., + format: _TimeUnitSpec[_NativeTD64Unit], /, ) -> timedelta64[dt.timedelta]: ... @overload - def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> Self: ... + @deprecated( + "Using 'generic' unit for NumPy timedelta is deprecated, and will raise an error in the future. " + "Please use a specific units instead." + ) + def __new__(cls, value: _ConvertibleToTD64, /) -> timedelta64: ... + @overload + def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec[_TD64Unit], /) -> timedelta64: ... # inherited at runtime from `signedinteger` def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... @@ -5395,183 +6949,295 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def __pos__(self, /) -> Self: ... def __abs__(self, /) -> Self: ... + # + @overload + def __add__(self: timedelta64[Never], x: timedelta64[int | dt.timedelta], /) -> timedelta64: ... @overload - def __add__(self: timedelta64[None], x: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __add__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... + @overload + @overload + def __add__(self: timedelta64[int | dt.timedelta], x: timedelta64[Never], /) -> timedelta64: ... + @overload + def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... @overload def __add__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... @overload def __add__(self: timedelta64[int], x: timedelta64, /) -> timedelta64[int | None]: ... @overload - def __add__(self: timedelta64[dt.timedelta], x: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + def __add__[AnyDateOrTimeT: (dt.datetime, dt.date, dt.timedelta)]( + self: timedelta64[dt.timedelta], x: AnyDateOrTimeT, / + ) -> AnyDateOrTimeT: ... @overload - def __add__(self: timedelta64[_AnyTD64Item], x: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + def __add__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( + self: timedelta64[AnyItemT], x: timedelta64[AnyItemT], / + ) -> timedelta64[AnyItemT]: ... @overload - def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] + @deprecated("Adding bare integers to NumPy timedelta is deprecated, and will raise an error in the future.") + def __add__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( + self: timedelta64[AnyItemT], x: _IntLike_co, / + ) -> timedelta64[AnyItemT]: ... __radd__ = __add__ + # @overload - def __mul__(self: timedelta64[_AnyTD64Item], x: int | np.integer | np.bool, /) -> timedelta64[_AnyTD64Item]: ... - @overload - def __mul__(self: timedelta64[_AnyTD64Item], x: float | np.floating, /) -> timedelta64[_AnyTD64Item | None]: ... + def __sub__(self: timedelta64[Never], b: timedelta64[int | dt.timedelta], /) -> timedelta64: ... @overload - def __mul__(self, x: float | np.floating | np.integer | np.bool, /) -> timedelta64: ... - __rmul__ = __mul__ - + def __sub__(self: timedelta64[None], b: timedelta64, /) -> timedelta64[None]: ... @overload - def __mod__(self, x: timedelta64[L[0] | None], /) -> timedelta64[None]: ... + def __sub__(self: timedelta64[int | dt.timedelta], b: timedelta64[Never], /) -> timedelta64: ... @overload - def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... + def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... @overload - def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + def __sub__(self: timedelta64[int], b: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... @overload - def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + def __sub__(self: timedelta64[int], b: timedelta64, /) -> timedelta64[int | None]: ... @overload - def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... @overload - def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + def __sub__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( + self: timedelta64[AnyItemT], b: timedelta64[AnyItemT], / + ) -> timedelta64[AnyItemT]: ... @overload - def __mod__(self, x: timedelta64, /) -> timedelta64: ... + @deprecated("Subtracting bare integers from NumPy timedelta is deprecated, and will raise an error in the future.") + def __sub__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( + self: timedelta64[AnyItemT], b: _IntLike_co, / + ) -> timedelta64[AnyItemT]: ... - # NOTE: The L[0] makes __mod__ non-commutative, which the first two overloads - # reflect. However, mypy does not seem to like this, so we ignore the errors. - @overload - def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[misc] + # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. + # This confuses mypy, so we ignore the [misc] errors it reports. @overload - def __rmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> timedelta64[None]: ... + def __rsub__(self: timedelta64[Never], a: timedelta64[int | dt.timedelta], /) -> timedelta64: ... @overload - def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... # type: ignore[misc] + def __rsub__(self: timedelta64[None], a: timedelta64, /) -> timedelta64[None]: ... @overload - def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... # type: ignore[misc] + def __rsub__[AnyDateT: (dt.datetime, dt.date)](self: timedelta64[dt.timedelta], a: AnyDateT, /) -> AnyDateT: ... @overload - def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + def __rsub__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[dt.timedelta], a: timedelta64[AnyItemT], / + ) -> timedelta64[AnyItemT]: ... @overload - def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... # type: ignore[misc] + def __rsub__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( + self: timedelta64[AnyItemT], a: timedelta64[AnyItemT], / + ) -> timedelta64[AnyItemT]: ... @overload - def __rmod__(self, x: timedelta64, /) -> timedelta64: ... # type: ignore[misc] - - # keep in sync with __mod__ + @deprecated("Subtracting NumPy timedelta from bare integers is deprecated, and will raise an error in the future.") + def __rsub__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( + self: timedelta64[AnyItemT], a: _IntLike_co, / + ) -> timedelta64[AnyItemT]: ... @overload - def __divmod__(self, x: timedelta64[L[0] | None], /) -> tuple[int64, timedelta64[None]]: ... + def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] @overload - def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... + + # @overload - def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + def __mul__(self: timedelta64[Never], x: _FloatLike_co, /) -> timedelta64: ... @overload - def __divmod__( - self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / - ) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + def __mul__(self: timedelta64[None], x: _FloatLike_co, /) -> timedelta64[None]: ... @overload - def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + def __mul__(self, x: _IntLike_co, /) -> Self: ... @overload - def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + def __mul__(self, x: float | floating, /) -> timedelta64[_TD64ItemT_co | None]: ... @overload - def __divmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... + def __mul__(self, x: _FloatLike_co, /) -> timedelta64: ... + __rmul__ = __mul__ - # keep in sync with __rmod__ + # keep in sync with __divmod__ @overload - def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] + def __mod__(self: timedelta64[Never], x: timedelta64[dt.timedelta], /) -> timedelta64: ... @overload - def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] + def __mod__(self: timedelta64[int | dt.timedelta], x: timedelta64[Never], /) -> timedelta64: ... @overload - def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] + def __mod__(self, x: timedelta64[L[0] | None], /) -> timedelta64[None]: ... @overload - def __rdivmod__( # type: ignore[misc] - self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / - ) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... @overload - def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... @overload - def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] + def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... @overload - def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... # type: ignore[misc] - + def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[int], /) -> timedelta64[int | None]: ... @overload - def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... + def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[dt.timedelta], /) -> timedelta64[dt.timedelta | None]: ... @overload - def __sub__(self: timedelta64[int], b: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... @overload - def __sub__(self: timedelta64[int], b: timedelta64, /) -> timedelta64[int | None]: ... + def __mod__(self, x: timedelta64, /) -> timedelta64: ... + + # keep in sync with __rdivmod__ + def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + + # keep in sync with __mod__ @overload - def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... + def __divmod__( + self: timedelta64[Never], x: timedelta64[Never] | timedelta64[dt.timedelta], / + ) -> tuple[int64, timedelta64]: ... @overload - def __sub__(self: timedelta64[_AnyTD64Item], b: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + def __divmod__(self: timedelta64[int | dt.timedelta], x: timedelta64[Never], /) -> tuple[int64, timedelta64]: ... @overload - def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] - - # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. - # This confuses mypy, so we ignore the [misc] errors it reports. + def __divmod__(self, x: timedelta64[L[0] | None], /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... @overload - def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... + def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... @overload - def __rsub__(self: timedelta64[dt.timedelta], a: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... @overload - def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] + def __divmod__(self: timedelta64[dt.timedelta], x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... @overload - def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] + def __divmod__( + self: timedelta64[dt.timedelta], x: timedelta64[dt.timedelta], / + ) -> tuple[int64, timedelta64[dt.timedelta | None]]: ... @overload - def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] + def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... @overload - def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... # type: ignore[misc] + def __divmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... + + # keep in sync with __rmod__ + def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + @overload + def __truediv__(self, b: timedelta64, /) -> float64: ... @overload def __truediv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> float: ... @overload - def __truediv__(self, b: timedelta64, /) -> float64: ... + def __truediv__(self: timedelta64[Never], b: float | floating | integer, /) -> timedelta64: ... @overload - def __truediv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + def __truediv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: int | integer, / + ) -> timedelta64[AnyItemT]: ... @overload - def __truediv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + def __truediv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: float | floating, / + ) -> timedelta64[AnyItemT | None]: ... @overload def __truediv__(self, b: float | floating | integer, /) -> timedelta64: ... - @overload - def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ... @overload def __rtruediv__(self, a: timedelta64, /) -> float64: ... + @overload + def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ... + @overload + def __floordiv__(self, b: timedelta64, /) -> int64: ... @overload def __floordiv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> int: ... @overload - def __floordiv__(self, b: timedelta64, /) -> int64: ... + def __floordiv__(self: timedelta64[Never], b: float | floating | integer, /) -> timedelta64: ... @overload - def __floordiv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + def __floordiv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: int | integer, / + ) -> timedelta64[AnyItemT]: ... @overload - def __floordiv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + def __floordiv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: float | floating, / + ) -> timedelta64[AnyItemT | None]: ... - @overload - def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... @overload def __rfloordiv__(self, a: timedelta64, /) -> int64: ... + @overload + def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... - # comparison ops - + # these mypy `has_type` errors appear to be false positives + @overload + def __lt__(self, other: timedelta64, /) -> bool_: ... # type: ignore[has-type] @overload - def __lt__(self, other: _TD64Like_co, /) -> bool_: ... + @deprecated("Comparing NumPy timedelta with bare integers is deprecated, and will raise an error in the future.") + def __lt__(self, other: _IntLike_co, /) -> bool_: ... @overload - def __lt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @deprecated("Comparing NumPy timedelta with bare integers is deprecated, and will raise an error in the future.") + def __lt__(self, other: _ArrayLikeInt_co, /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _ArrayLike[timedelta64] | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... @overload def __lt__(self, other: _SupportsGT, /) -> bool_: ... @overload - def __le__(self, other: _TD64Like_co, /) -> bool_: ... + def __le__(self, other: timedelta64, /) -> bool_: ... # type: ignore[has-type] @overload - def __le__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @deprecated("Comparing NumPy timedelta with bare integers is deprecated, and will raise an error in the future.") + def __le__(self, other: _IntLike_co, /) -> bool_: ... @overload - def __le__(self, other: _SupportsGT, /) -> bool_: ... + @deprecated("Comparing NumPy timedelta with bare integers is deprecated, and will raise an error in the future.") + def __le__(self, other: _ArrayLikeInt_co, /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _ArrayLike[timedelta64] | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGE, /) -> bool_: ... @overload - def __gt__(self, other: _TD64Like_co, /) -> bool_: ... + def __gt__(self, other: timedelta64, /) -> bool_: ... @overload - def __gt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @deprecated("Comparing NumPy timedelta with bare integers is deprecated, and will raise an error in the future.") + def __gt__(self, other: _IntLike_co, /) -> bool_: ... @overload - def __gt__(self, other: _SupportsGT, /) -> bool_: ... + @deprecated("Comparing NumPy timedelta with bare integers is deprecated, and will raise an error in the future.") + def __gt__(self, other: _ArrayLikeInt_co, /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _ArrayLike[timedelta64] | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsLT, /) -> bool_: ... @overload - def __ge__(self, other: _TD64Like_co, /) -> bool_: ... + def __ge__(self, other: timedelta64, /) -> bool_: ... @overload - def __ge__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @deprecated("Comparing NumPy timedelta with bare integers is deprecated, and will raise an error in the future.") + def __ge__(self, other: _IntLike_co, /) -> bool_: ... @overload - def __ge__(self, other: _SupportsGT, /) -> bool_: ... + @deprecated("Comparing NumPy timedelta with bare integers is deprecated, and will raise an error in the future.") + def __ge__(self, other: _ArrayLikeInt_co, /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _ArrayLike[timedelta64] | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsLE, /) -> bool_: ... + + # keep in sync with `number.sum` + @override # type: ignore[override] + @overload # out: None (default) + def sum( + self, + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Self: ... + @overload # dtype: (keyword) + def sum( + self, + axis: _ShapeLike | None = None, + *, + dtype: DTypeLike, + out: None = None, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # dtype: (positional) + def sum( + self, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # out: + def sum[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @property @@ -5582,9 +7248,9 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __new__(cls, value: datetime64[_DT64ItemT_co], /) -> Self: ... @overload - def __new__(cls, value: _AnyDT64Arg, /) -> datetime64[_AnyDT64Arg]: ... + def __new__[AnyItemT: (dt.datetime, dt.date, None)](cls, value: AnyItemT, /) -> datetime64[AnyItemT]: ... @overload - def __new__(cls, value: _NaTValue | None = ..., format: _TimeUnitSpec = ..., /) -> datetime64[None]: ... + def __new__(cls, value: _NaTValue | None = ..., format: _TimeUnitSpec[_TD64Unit] = ..., /) -> datetime64[None]: ... @overload def __new__(cls, value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> datetime64[dt.datetime]: ... @overload @@ -5592,20 +7258,26 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> datetime64[int]: ... @overload - def __new__( + def __new__( # type: ignore[overload-cannot-match] cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / ) -> datetime64[dt.datetime]: ... @overload - def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> datetime64[dt.date]: ... + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> datetime64[dt.date]: ... # type: ignore[overload-cannot-match] @overload - def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> Self: ... + def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec[_TD64Unit] = ..., /) -> Self: ... + # def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + # @overload - def __add__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + def __add__(self: datetime64[Never], x: _TD64Like_co, /) -> datetime64: ... @overload - def __add__(self: datetime64[None], x: _TD64Like_co, /) -> datetime64[None]: ... + def __add__(self, x: _IntLike_co, /) -> Self: ... + @overload + def __add__(self: datetime64[None], x: timedelta64, /) -> datetime64[None]: ... + @overload + def __add__(self: datetime64[int | dt.datetime], x: timedelta64[Never], /) -> datetime64: ... @overload def __add__(self: datetime64[int], x: timedelta64[int | dt.timedelta], /) -> datetime64[int]: ... @overload @@ -5615,15 +7287,20 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __add__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[int]: ... @overload - def __add__(self, x: datetime64[None], /) -> datetime64[None]: ... + def __add__(self, x: timedelta64[None], /) -> datetime64[None]: ... @overload def __add__(self, x: _TD64Like_co, /) -> datetime64: ... __radd__ = __add__ + # + @overload + def __sub__(self: datetime64[Never], x: _TD64Like_co, /) -> datetime64: ... @overload - def __sub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + def __sub__(self: datetime64[Never], x: datetime64, /) -> timedelta64: ... @overload - def __sub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + def __sub__(self, x: _IntLike_co, /) -> Self: ... + @overload + def __sub__(self: datetime64[dt.date], x: dt.date, /) -> dt.timedelta: ... @overload def __sub__(self: datetime64[None], x: timedelta64, /) -> datetime64[None]: ... @overload @@ -5655,22 +7332,25 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): # NOTE: mypy gets confused by the non-commutativity of subtraction here @overload - def __rsub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + def __rsub__(self: datetime64[Never], x: datetime64, /) -> timedelta64: ... + @overload + def __rsub__(self, x: _IntLike_co, /) -> Self: ... @overload - def __rsub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + def __rsub__(self: datetime64[dt.date], x: dt.date, /) -> dt.timedelta: ... @overload def __rsub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ... @overload def __rsub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... @overload - def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... # type: ignore[misc] + def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... @overload - def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... # type: ignore[misc] + def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... @overload - def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... # type: ignore[misc] + def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... @overload - def __rsub__(self, x: datetime64, /) -> timedelta64: ... # type: ignore[misc] + def __rsub__(self, x: datetime64, /) -> timedelta64: ... + # @overload def __lt__(self, other: datetime64, /) -> bool_: ... @overload @@ -5708,10 +7388,33 @@ class void(flexible[bytes | tuple[Any, ...]]): # type: ignore[misc] @overload def __new__(cls, length_or_data: object, /, dtype: _DTypeLikeVoid) -> Self: ... + # + @overload + def __getitem__(self, key: tuple[()], /) -> Self: ... + @overload + def __getitem__( + self, key: EllipsisType | tuple[EllipsisType], / + ) -> ndarray[tuple[()], dtype[Self]]: ... + @overload + def __getitem__( + self, key: None | tuple[None], / + ) -> ndarray[tuple[int], dtype[Self]]: ... + @overload + def __getitem__( + self, key: tuple[None, None], / + ) -> ndarray[tuple[int, int], dtype[Self]]: ... + @overload + def __getitem__( + self, key: tuple[None, None, None], / + ) -> ndarray[tuple[int, int, int], dtype[Self]]: ... + @overload # Limited support for (None,) * N > 3 + def __getitem__(self, key: tuple[None, ...], /) -> NDArray[Self]: ... @overload def __getitem__(self, key: str | SupportsIndex, /) -> Any: ... @overload def __getitem__(self, key: list[str], /) -> void: ... + + # def __setitem__(self, key: str | list[str] | SupportsIndex, value: ArrayLike, /) -> None: ... def setfield(self, val: ArrayLike, dtype: DTypeLike, offset: int = ...) -> None: ... @@ -5728,6 +7431,10 @@ class bytes_(character[bytes], bytes): # type: ignore[misc] @overload def __new__(cls, value: str, /, encoding: str, errors: str = "strict") -> Self: ... + # + @override + def __hash__(self, /) -> int: ... + # def __bytes__(self, /) -> bytes: ... @@ -5737,6 +7444,10 @@ class str_(character[str], str): # type: ignore[misc] @overload def __new__(cls, value: bytes, /, encoding: str, errors: str = "strict") -> Self: ... + # + @override + def __hash__(self, /) -> int: ... + # See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs @final class ufunc: @@ -5820,7 +7531,7 @@ class ufunc: *, signature: tuple[dtype | None, ...] | None = None, casting: _CastingKind | None = None, - reduction: builtins.bool = False, + reduction: py_bool = False, ) -> tuple[dtype, ...]: ... # Parameters: `__name__`, `ntypes` and `identity` @@ -5958,231 +7669,10 @@ class broadcast: def __iter__(self) -> Self: ... def reset(self) -> None: ... -@final -class busdaycalendar: - def __init__( - self, - /, - weekmask: str | Sequence[int | bool_ | integer] | _SupportsArray[dtype[bool_ | integer]] = "1111100", - holidays: Sequence[dt.date | datetime64] | _SupportsArray[dtype[datetime64]] | None = None, - ) -> None: ... - @property - def weekmask(self) -> ndarray[tuple[int], dtype[bool_]]: ... - @property - def holidays(self) -> ndarray[tuple[int], dtype[datetime64[dt.date]]]: ... - -@final -class nditer: - @overload - def __init__( - self, - /, - op: ArrayLike, - flags: Sequence[_NDIterFlagsKind] | None = None, - op_flags: Sequence[_NDIterFlagsOp] | None = None, - op_dtypes: DTypeLike | None = None, - order: _OrderKACF = "K", - casting: _CastingKind = "safe", - op_axes: Sequence[SupportsIndex] | None = None, - itershape: _ShapeLike | None = None, - buffersize: SupportsIndex = 0, - ) -> None: ... - @overload - def __init__( - self, - /, - op: Sequence[ArrayLike | None], - flags: Sequence[_NDIterFlagsKind] | None = None, - op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = None, - op_dtypes: Sequence[DTypeLike | None] | None = None, - order: _OrderKACF = "K", - casting: _CastingKind = "safe", - op_axes: Sequence[Sequence[SupportsIndex]] | None = None, - itershape: _ShapeLike | None = None, - buffersize: SupportsIndex = 0, - ) -> None: ... - - def __enter__(self) -> nditer: ... - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_value: BaseException | None, - traceback: TracebackType | None, - ) -> None: ... - def __iter__(self) -> nditer: ... - def __next__(self) -> tuple[NDArray[Any], ...]: ... - def __len__(self) -> int: ... - def __copy__(self) -> nditer: ... - @overload - def __getitem__(self, index: SupportsIndex) -> NDArray[Any]: ... - @overload - def __getitem__(self, index: slice) -> tuple[NDArray[Any], ...]: ... - def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike) -> None: ... - def close(self) -> None: ... - def copy(self) -> nditer: ... - def debug_print(self) -> None: ... - def enable_external_loop(self) -> None: ... - def iternext(self) -> builtins.bool: ... - def remove_axis(self, i: SupportsIndex, /) -> None: ... - def remove_multi_index(self) -> None: ... - def reset(self) -> None: ... - @property - def dtypes(self) -> tuple[dtype, ...]: ... - @property - def finished(self) -> builtins.bool: ... - @property - def has_delayed_bufalloc(self) -> builtins.bool: ... - @property - def has_index(self) -> builtins.bool: ... - @property - def has_multi_index(self) -> builtins.bool: ... - @property - def index(self) -> int: ... - @property - def iterationneedsapi(self) -> builtins.bool: ... - @property - def iterindex(self) -> int: ... - @property - def iterrange(self) -> tuple[int, ...]: ... - @property - def itersize(self) -> int: ... - @property - def itviews(self) -> tuple[NDArray[Any], ...]: ... - @property - def multi_index(self) -> tuple[int, ...]: ... - @property - def ndim(self) -> int: ... - @property - def nop(self) -> int: ... - @property - def operands(self) -> tuple[NDArray[Any], ...]: ... - @property - def shape(self) -> tuple[int, ...]: ... - @property - def value(self) -> tuple[NDArray[Any], ...]: ... - -class memmap(ndarray[_ShapeT_co, _DTypeT_co]): - __array_priority__: ClassVar[float] - filename: str | None - offset: int - mode: str - @overload - def __new__( - subtype, - filename: StrOrBytesPath | _SupportsFileMethodsRW, - dtype: type[uint8] = ..., - mode: _MemMapModeKind = "r+", - offset: int = 0, - shape: int | tuple[int, ...] | None = None, - order: _OrderKACF = "C", - ) -> memmap[Any, dtype[uint8]]: ... - @overload - def __new__( - subtype, - filename: StrOrBytesPath | _SupportsFileMethodsRW, - dtype: _DTypeLike[_ScalarT], - mode: _MemMapModeKind = "r+", - offset: int = 0, - shape: int | tuple[int, ...] | None = None, - order: _OrderKACF = "C", - ) -> memmap[Any, dtype[_ScalarT]]: ... - @overload - def __new__( - subtype, - filename: StrOrBytesPath | _SupportsFileMethodsRW, - dtype: DTypeLike, - mode: _MemMapModeKind = "r+", - offset: int = 0, - shape: int | tuple[int, ...] | None = None, - order: _OrderKACF = "C", - ) -> memmap[Any, dtype]: ... - def __array_finalize__(self, obj: object) -> None: ... - def __array_wrap__( - self, - array: memmap[_ShapeT_co, _DTypeT_co], # type: ignore[override] - context: tuple[ufunc, tuple[Any, ...], int] | None = None, - return_scalar: builtins.bool = False, - ) -> Any: ... - def flush(self) -> None: ... - -class poly1d: - @property - def variable(self) -> LiteralString: ... - @property - def order(self) -> int: ... - @property - def o(self) -> int: ... - @property - def roots(self) -> NDArray[Any]: ... - @property - def r(self) -> NDArray[Any]: ... - - @property - def coeffs(self) -> NDArray[Any]: ... - @coeffs.setter - def coeffs(self, value: NDArray[Any]) -> None: ... - - @property - def c(self) -> NDArray[Any]: ... - @c.setter - def c(self, value: NDArray[Any]) -> None: ... - - @property - def coef(self) -> NDArray[Any]: ... - @coef.setter - def coef(self, value: NDArray[Any]) -> None: ... - - @property - def coefficients(self) -> NDArray[Any]: ... - @coefficients.setter - def coefficients(self, value: NDArray[Any]) -> None: ... - - __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] - - @overload - def __array__(self, /, t: None = None, copy: builtins.bool | None = None) -> ndarray[tuple[int], dtype]: ... - @overload - def __array__(self, /, t: _DTypeT, copy: builtins.bool | None = None) -> ndarray[tuple[int], _DTypeT]: ... - - @overload - def __call__(self, val: _ScalarLike_co) -> Any: ... - @overload - def __call__(self, val: poly1d) -> poly1d: ... - @overload - def __call__(self, val: ArrayLike) -> NDArray[Any]: ... - - def __init__( - self, - c_or_r: ArrayLike, - r: builtins.bool = False, - variable: str | None = None, - ) -> None: ... - def __len__(self) -> int: ... - def __neg__(self) -> poly1d: ... - def __pos__(self) -> poly1d: ... - def __mul__(self, other: ArrayLike, /) -> poly1d: ... - def __rmul__(self, other: ArrayLike, /) -> poly1d: ... - def __add__(self, other: ArrayLike, /) -> poly1d: ... - def __radd__(self, other: ArrayLike, /) -> poly1d: ... - def __pow__(self, val: _FloatLike_co, /) -> poly1d: ... # Integral floats are accepted - def __sub__(self, other: ArrayLike, /) -> poly1d: ... - def __rsub__(self, other: ArrayLike, /) -> poly1d: ... - def __truediv__(self, other: ArrayLike, /) -> poly1d: ... - def __rtruediv__(self, other: ArrayLike, /) -> poly1d: ... - def __getitem__(self, val: int, /) -> Any: ... - def __setitem__(self, key: int, val: Any, /) -> None: ... - def __iter__(self) -> Iterator[Any]: ... - def deriv(self, m: SupportsInt | SupportsIndex = 1) -> poly1d: ... - def integ( - self, - m: SupportsInt | SupportsIndex = 1, - k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = 0, - ) -> poly1d: ... - def from_dlpack( x: _SupportsDLPack[None], /, *, device: L["cpu"] | None = None, - copy: builtins.bool | None = None, -) -> NDArray[number | np.bool]: ... + copy: py_bool | None = None, +) -> NDArray[number | bool_]: ... diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index 067e38798718..9e652b58b938 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -24,8 +24,10 @@ uint32, uint64, ) +from numpy._utils import set_module +@set_module('numpy') class __array_namespace_info__: """ Get the array API inspection namespace for NumPy. @@ -58,8 +60,6 @@ class __array_namespace_info__: """ - __module__ = 'numpy' - def capabilities(self): """ Return a dictionary of array API library capabilities. @@ -322,7 +322,7 @@ def devices(self): """ The devices supported by NumPy. - For NumPy, this always returns ``['cpu']``. + For NumPy, this always returns ``('cpu',)``. Returns ------- @@ -340,7 +340,7 @@ def devices(self): -------- >>> info = np.__array_namespace_info__() >>> info.devices() - ['cpu'] + ('cpu',) """ - return ["cpu"] + return ("cpu",) diff --git a/numpy/_array_api_info.pyi b/numpy/_array_api_info.pyi index 396125143e92..f6fc86d38dbb 100644 --- a/numpy/_array_api_info.pyi +++ b/numpy/_array_api_info.pyi @@ -1,18 +1,9 @@ -from typing import ( - Literal, - Never, - TypeAlias, - TypedDict, - TypeVar, - final, - overload, - type_check_only, -) +from typing import Literal, Never, TypedDict, final, overload, type_check_only import numpy as np -_Device: TypeAlias = Literal["cpu"] -_DeviceLike: TypeAlias = _Device | None +type _Device = Literal["cpu"] +type _DeviceLike = _Device | None _Capabilities = TypedDict( "_Capabilities", @@ -32,33 +23,22 @@ _DefaultDTypes = TypedDict( }, ) -_KindBool: TypeAlias = Literal["bool"] -_KindInt: TypeAlias = Literal["signed integer"] -_KindUInt: TypeAlias = Literal["unsigned integer"] -_KindInteger: TypeAlias = Literal["integral"] -_KindFloat: TypeAlias = Literal["real floating"] -_KindComplex: TypeAlias = Literal["complex floating"] -_KindNumber: TypeAlias = Literal["numeric"] -_Kind: TypeAlias = ( - _KindBool - | _KindInt - | _KindUInt - | _KindInteger - | _KindFloat - | _KindComplex - | _KindNumber -) - -_T1 = TypeVar("_T1") -_T2 = TypeVar("_T2") -_T3 = TypeVar("_T3") -_Permute1: TypeAlias = _T1 | tuple[_T1] -_Permute2: TypeAlias = tuple[_T1, _T2] | tuple[_T2, _T1] -_Permute3: TypeAlias = ( - tuple[_T1, _T2, _T3] | tuple[_T1, _T3, _T2] - | tuple[_T2, _T1, _T3] | tuple[_T2, _T3, _T1] - | tuple[_T3, _T1, _T2] | tuple[_T3, _T2, _T1] -) +type _KindBool = Literal["bool"] +type _KindInt = Literal["signed integer"] +type _KindUInt = Literal["unsigned integer"] +type _KindInteger = Literal["integral"] +type _KindFloat = Literal["real floating"] +type _KindComplex = Literal["complex floating"] +type _KindNumber = Literal["numeric"] +type _Kind = _KindBool | _KindInt | _KindUInt | _KindInteger | _KindFloat | _KindComplex | _KindNumber + +type _Permute1[T1] = T1 | tuple[T1] +type _Permute2[T1, T2] = tuple[T1, T2] | tuple[T2, T1] +type _Permute3[T1, T2, T3] = ( + tuple[T1, T2, T3] | tuple[T1, T3, T2] + | tuple[T2, T1, T3] | tuple[T2, T3, T1] + | tuple[T3, T1, T2] | tuple[T3, T2, T1] +) # fmt: skip @type_check_only class _DTypesBool(TypedDict): @@ -113,19 +93,15 @@ class _DTypesUnion(TypedDict, total=False): complex64: np.dtype[np.complex64] complex128: np.dtype[np.complex128] -_EmptyDict: TypeAlias = dict[Never, Never] +type _EmptyDict = dict[Never, Never] @final class __array_namespace_info__: - __module__: Literal["numpy"] = "numpy" + __module__: Literal["numpy"] = "numpy" # pyrefly: ignore[bad-override] def capabilities(self) -> _Capabilities: ... def default_device(self) -> _Device: ... - def default_dtypes( - self, - *, - device: _DeviceLike = None, - ) -> _DefaultDTypes: ... + def default_dtypes(self, *, device: _DeviceLike = None) -> _DefaultDTypes: ... def devices(self) -> list[_Device]: ... @overload @@ -175,20 +151,14 @@ class __array_namespace_info__: self, *, device: _DeviceLike = None, - kind: ( - _Permute1[_KindInteger] - | _Permute2[_KindInt, _KindUInt] - ), + kind: _Permute1[_KindInteger] | _Permute2[_KindInt, _KindUInt], ) -> _DTypesInteger: ... @overload def dtypes( self, *, device: _DeviceLike = None, - kind: ( - _Permute1[_KindNumber] - | _Permute3[_KindInteger, _KindFloat, _KindComplex] - ), + kind: _Permute1[_KindNumber] | _Permute3[_KindInteger, _KindFloat, _KindComplex], ) -> _DTypesNumber: ... @overload def dtypes( diff --git a/numpy/_build_utils/__init__.py b/numpy/_build_utils/__init__.py deleted file mode 100644 index 10b282d8d9ee..000000000000 --- a/numpy/_build_utils/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Don't use the deprecated NumPy C API. Define this to a fixed version -# instead of NPY_API_VERSION in order not to break compilation for -# released SciPy versions when NumPy introduces a new deprecation. Use -# in setup.py:: -# -# config.add_extension('_name', sources=['source_fname'], **numpy_nodepr_api) -# -numpy_nodepr_api = { - "define_macros": [("NPY_NO_DEPRECATED_API", "NPY_1_9_API_VERSION")] -} - - -def import_file(folder, module_name): - """Import a file directly, avoiding importing scipy""" - import importlib - import pathlib - - fname = pathlib.Path(folder) / f'{module_name}.py' - spec = importlib.util.spec_from_file_location(module_name, str(fname)) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - return module diff --git a/numpy/distutils/conv_template.py b/numpy/_build_utils/conv_template.py similarity index 90% rename from numpy/distutils/conv_template.py rename to numpy/_build_utils/conv_template.py index c8933d1d4286..3f6347371ae0 100644 --- a/numpy/distutils/conv_template.py +++ b/numpy/_build_utils/conv_template.py @@ -82,8 +82,8 @@ __all__ = ['process_str', 'process_file'] import os -import sys import re +import sys # names for replacement that are already global. global_names = {} @@ -106,12 +106,12 @@ def parse_structure(astr, level): at zero. Returns an empty list if no loops found. """ - if level == 0 : + if level == 0: loopbeg = "/**begin repeat" loopend = "/**end repeat**/" - else : - loopbeg = "/**begin repeat%d" % level - loopend = "/**end repeat%d**/" % level + else: + loopbeg = f"/**begin repeat{level}" + loopend = f"/**end repeat{level}**/" ind = 0 line = 0 @@ -124,9 +124,9 @@ def parse_structure(astr, level): start2 = astr.find("\n", start2) fini1 = astr.find(loopend, start2) fini2 = astr.find("\n", fini1) - line += astr.count("\n", ind, start2+1) - spanlist.append((start, start2+1, fini1, fini2+1, line)) - line += astr.count("\n", start2+1, fini2) + line += astr.count("\n", ind, start2 + 1) + spanlist.append((start, start2 + 1, fini1, fini2 + 1, line)) + line += astr.count("\n", start2 + 1, fini2) ind = fini2 spanlist.sort() return spanlist @@ -135,10 +135,13 @@ def parse_structure(astr, level): def paren_repl(obj): torep = obj.group(1) numrep = obj.group(2) - return ','.join([torep]*int(numrep)) + return ','.join([torep] * int(numrep)) + parenrep = re.compile(r"\(([^)]*)\)\*(\d+)") plainrep = re.compile(r"([^*]+)\*(\d+)") + + def parse_values(astr): # replaces all occurrences of '(a,b,c)*4' in astr # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate @@ -155,7 +158,7 @@ def parse_values(astr): named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#") exclude_vars_re = re.compile(r"(\w*)=(\w*)") exclude_re = re.compile(":exclude:") -def parse_loop_header(loophead) : +def parse_loop_header(loophead): """Find all named replacements in the header Returns a list of dictionaries, one for each loop iteration, @@ -179,14 +182,13 @@ def parse_loop_header(loophead) : name = rep[0] vals = parse_values(rep[1]) size = len(vals) - if nsub is None : + if nsub is None: nsub = size - elif nsub != size : + elif nsub != size: msg = "Mismatch in number of values, %d != %d\n%s = %s" raise ValueError(msg % (nsub, size, name, vals)) names.append((name, vals)) - # Find any exclude variables excludes = [] @@ -200,30 +202,33 @@ def parse_loop_header(loophead) : # generate list of dictionaries, one for each template iteration dlist = [] - if nsub is None : + if nsub is None: raise ValueError("No substitution variables found") for i in range(nsub): tmp = {name: vals[i] for name, vals in names} dlist.append(tmp) return dlist + replace_re = re.compile(r"@(\w+)@") -def parse_string(astr, env, level, line) : - lineno = "#line %d\n" % line + + +def parse_string(astr, env, level, line): + lineno = f"#line {line}\n" # local function for string replacement, uses env def replace(match): name = match.group(1) - try : + try: val = env[name] except KeyError: - msg = 'line %d: no definition of key "%s"'%(line, name) + msg = f'line {line}: no definition of key "{name}"' raise ValueError(msg) from None return val code = [lineno] struct = parse_structure(astr, level) - if struct : + if struct: # recurse over inner loops oldend = 0 newlevel = level + 1 @@ -234,18 +239,18 @@ def replace(match): oldend = sub[3] newline = line + sub[4] code.append(replace_re.sub(replace, pref)) - try : + try: envlist = parse_loop_header(head) except ValueError as e: - msg = "line %d: %s" % (newline, e) + msg = f"line {newline}: {e}" raise ValueError(msg) - for newenv in envlist : + for newenv in envlist: newenv.update(env) newcode = parse_string(text, newenv, newlevel, newline) code.extend(newcode) suff = astr[oldend:] code.append(replace_re.sub(replace, suff)) - else : + else: # replace keys code.append(replace_re.sub(replace, astr)) code.append('\n') @@ -284,8 +289,8 @@ def process_file(source): try: code = process_str(''.join(lines)) except ValueError as e: - raise ValueError('In "%s" loop at %s' % (sourcefile, e)) from None - return '#line 1 "%s"\n%s' % (sourcefile, code) + raise ValueError(f'In "{sourcefile}" loop at {e}') from None + return f'#line 1 "{sourcefile}"\n{code}' def unique_key(adict): @@ -321,9 +326,10 @@ def main(): try: writestr = process_str(allstr) except ValueError as e: - raise ValueError("In %s loop at %s" % (file, e)) from None + raise ValueError(f"In {file} loop at {e}") from None outfile.write(writestr) + if __name__ == "__main__": main() diff --git a/numpy/_build_utils/process_src_template.py b/numpy/_build_utils/process_src_template.py index 8bd1ea872a42..f934c222e838 100644 --- a/numpy/_build_utils/process_src_template.py +++ b/numpy/_build_utils/process_src_template.py @@ -5,11 +5,11 @@ def get_processor(): - # Convoluted because we can't import from numpy.distutils + # Convoluted because we can't import from numpy # (numpy is not yet built) conv_template_path = os.path.join( os.path.dirname(__file__), - '..', 'distutils', 'conv_template.py' + 'conv_template.py' ) spec = importlib.util.spec_from_file_location( 'conv_template', conv_template_path diff --git a/numpy/_build_utils/tempita/_looper.py b/numpy/_build_utils/tempita/_looper.py index e7d6b2649fb5..0d3de22ac80c 100644 --- a/numpy/_build_utils/tempita/_looper.py +++ b/numpy/_build_utils/tempita/_looper.py @@ -41,8 +41,7 @@ def __iter__(self): return looper_iter(self.seq) def __repr__(self): - return '<%s for %r>' % ( - self.__class__.__name__, self.seq) + return f'<{self.__class__.__name__} for {self.seq!r}>' class looper_iter: @@ -69,8 +68,7 @@ def __init__(self, seq, pos): self.pos = pos def __repr__(self): - return '' % ( - self.seq[self.pos], self.pos) + return f'' def index(self): return self.pos diff --git a/numpy/_build_utils/tempita/_tempita.py b/numpy/_build_utils/tempita/_tempita.py index 88ead791574b..3d5113085183 100644 --- a/numpy/_build_utils/tempita/_tempita.py +++ b/numpy/_build_utils/tempita/_tempita.py @@ -64,9 +64,9 @@ def __init__(self, message, position, name=None): def __str__(self): msg = " ".join(self.args) if self.position: - msg = "%s at line %s column %s" % (msg, self.position[0], self.position[1]) + msg = f"{msg} at line {self.position[0]} column {self.position[1]}" if self.name: - msg += " in %s" % self.name + msg += f" in {self.name}" return msg @@ -140,7 +140,7 @@ def __init__( else: name = "" if lineno: - name += ":%s" % lineno + name += f":{lineno}" self.name = name self._parsed = parse( content, name=name, line_offset=line_offset, delimiters=self.delimiters @@ -185,8 +185,8 @@ def substitute(self, *args, **kw): if not hasattr(args[0], "items"): raise TypeError( "If you pass in a single argument, you must pass in a " - "dictionary-like object (with a .items() method); you gave %r" - % (args[0],) + "dictionary-like object (with a .items() method); " + f"you gave {args[0]!r}" ) kw = args[0] ns = kw @@ -278,7 +278,7 @@ def _interpret_code(self, code, ns, out, defs): elif name == "comment": return else: - assert 0, "Unknown code: %r" % name + assert 0, f"Unknown code: {name!r}" def _interpret_for(self, vars, expr, content, ns, out, defs): __traceback_hide__ = True @@ -288,8 +288,7 @@ def _interpret_for(self, vars, expr, content, ns, out, defs): else: if len(vars) != len(item): raise ValueError( - "Need %i items to unpack (got %i items)" - % (len(vars), len(item)) + f"Need {len(vars)} items to unpack (got {len(item)} items)" ) for name, value in zip(vars, item): ns[name] = value @@ -320,7 +319,7 @@ def _eval(self, code, ns, pos): try: value = eval(code, self.default_namespace, ns) except SyntaxError as e: - raise SyntaxError("invalid syntax in expression: %s" % code) + raise SyntaxError(f"invalid syntax in expression: {code}") return value except Exception as e: if getattr(e, "args", None): @@ -363,8 +362,8 @@ def _repr(self, value, pos): if self._unicode and isinstance(value, bytes): if not self.default_encoding: raise UnicodeDecodeError( - "Cannot decode bytes value %r into unicode " - "(no default_encoding provided)" % value + f"Cannot decode bytes value {value!r} into unicode " + "(no default_encoding provided)" ) try: value = value.decode(self.default_encoding) @@ -374,21 +373,21 @@ def _repr(self, value, pos): e.object, e.start, e.end, - e.reason + " in string %r" % value, + e.reason + f" in string {value!r}", ) elif not self._unicode and isinstance(value, str): if not self.default_encoding: raise UnicodeEncodeError( - "Cannot encode unicode value %r into bytes " - "(no default_encoding provided)" % value + f"Cannot encode unicode value {value!r} into bytes " + "(no default_encoding provided)" ) value = value.encode(self.default_encoding) return value def _add_line_info(self, msg, pos): - msg = "%s at line %s column %s" % (msg, pos[0], pos[1]) + msg = f"{msg} at line {pos[0]} column {pos[1]}" if self.name: - msg += " in file %s" % self.name + msg += f" in file {self.name}" return msg @@ -427,10 +426,8 @@ def __getitem__(self, key): return dict.__getitem__(self, key) def __repr__(self): - return "<%s %s>" % ( - self.__class__.__name__, - " ".join(["%s=%r" % (k, v) for k, v in sorted(self.items())]), - ) + items_str = " ".join([f"{k}={v!r}" for k, v in sorted(self.items())]) + return f"<{self.__class__.__name__} {items_str}>" class TemplateDef: @@ -446,12 +443,8 @@ def __init__( self._bound_self = bound_self def __repr__(self): - return "" % ( - self._func_name, - self._func_signature, - self._template.name, - self._pos, - ) + return (f"") def __str__(self): return self() @@ -486,7 +479,7 @@ def _parse_signature(self, args, kw): extra_kw = {} for name, value in kw.items(): if not var_kw and name not in sig_args: - raise TypeError("Unexpected argument %s" % name) + raise TypeError(f"Unexpected argument {name}") if name in sig_args: values[sig_args] = value else: @@ -503,15 +496,14 @@ def _parse_signature(self, args, kw): values[var_args] = tuple(args) break else: - raise TypeError( - "Extra position arguments: %s" % ", ".join([repr(v) for v in args]) - ) + args_str = ", ".join([repr(v) for v in args]) + raise TypeError(f"Extra position arguments: {args_str}") for name, value_expr in defaults.items(): if name not in values: values[name] = self._template._eval(value_expr, self._ns, self._pos) for name in sig_args: if name not in values: - raise TypeError("Missing argument: %s" % name) + raise TypeError(f"Missing argument: {name}") if var_kw: values[var_kw] = extra_kw return values @@ -523,7 +515,7 @@ def __init__(self, name): self.get = TemplateObjectGetter(self) def __repr__(self): - return "<%s %s>" % (self.__class__.__name__, self.__name) + return f"<{self.__class__.__name__} {self.__name}>" class TemplateObjectGetter: @@ -534,7 +526,7 @@ def __getattr__(self, attr): return getattr(self.__template_obj, attr, Empty) def __repr__(self): - return "<%s around %r>" % (self.__class__.__name__, self.__template_obj) + return f"<{self.__class__.__name__} around {self.__template_obj!r}>" class _Empty: @@ -598,18 +590,18 @@ def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None): last_pos = (line_offset + 1, 1) token_re = re.compile( - r"%s|%s" % (re.escape(delimiters[0]), re.escape(delimiters[1])) + rf"{re.escape(delimiters[0])}|{re.escape(delimiters[1])}" ) for match in token_re.finditer(s): expr = match.group(0) pos = find_position(s, match.end(), last, last_pos) if expr == delimiters[0] and in_expr: raise TemplateError( - "%s inside expression" % delimiters[0], position=pos, name=name + f"{delimiters[0]} inside expression", position=pos, name=name ) elif expr == delimiters[1] and not in_expr: raise TemplateError( - "%s outside expression" % delimiters[1], position=pos, name=name + f"{delimiters[1]} outside expression", position=pos, name=name ) if expr == delimiters[0]: part = s[last:match.start()] @@ -623,7 +615,7 @@ def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None): last_pos = pos if in_expr: raise TemplateError( - "No %s to finish last expression" % delimiters[1], + f"No {delimiters[1]} to finish last expression", name=name, position=last_pos, ) @@ -800,12 +792,12 @@ def parse_expr(tokens, name, context=()): return parse_cond(tokens, name, context) elif expr.startswith("elif ") or expr == "else": raise TemplateError( - "%s outside of an if block" % expr.split()[0], position=pos, name=name + f"{expr.split()[0]} outside of an if block", position=pos, name=name ) elif expr in ("if", "elif", "for"): - raise TemplateError("%s with no expression" % expr, position=pos, name=name) + raise TemplateError(f"{expr} with no expression", position=pos, name=name) elif expr in ("endif", "endfor", "enddef"): - raise TemplateError("Unexpected %s" % expr, position=pos, name=name) + raise TemplateError(f"Unexpected {expr}", position=pos, name=name) elif expr.startswith("for "): return parse_for(tokens, name, context) elif expr.startswith("default "): @@ -843,7 +835,7 @@ def parse_one_cond(tokens, name, context): elif first == "else": part = ("else", pos, None, content) else: - assert 0, "Unexpected token %r at %s" % (first, pos) + assert 0, f"Unexpected token {first!r} at {pos}" while 1: if not tokens: raise TemplateError("No {{endif}}", position=pos, name=name) @@ -867,11 +859,11 @@ def parse_for(tokens, name, context): first = first[3:].strip() match = in_re.search(first) if not match: - raise TemplateError('Bad for (no "in") in %r' % first, position=pos, name=name) + raise TemplateError(f'Bad for (no "in") in {first!r}', position=pos, name=name) vars = first[: match.start()] if "(" in vars: raise TemplateError( - "You cannot have () in the variable section of a for loop (%r)" % vars, + f"You cannot have () in the variable section of a for loop ({vars!r})", position=pos, name=name, ) @@ -893,7 +885,7 @@ def parse_default(tokens, name, context): parts = first.split("=", 1) if len(parts) == 1: raise TemplateError( - "Expression must be {{default var=value}}; no = found in %r" % first, + f"Expression must be {{{{default var=value}}}}; no = found in {first!r}", position=pos, name=name, ) @@ -904,7 +896,7 @@ def parse_default(tokens, name, context): ) if not var_re.search(var): raise TemplateError( - "Not a valid variable name for {{default}}: %r" % var, + f"Not a valid variable name for {{{{default}}}}: {var!r}", position=pos, name=name, ) @@ -930,7 +922,7 @@ def parse_def(tokens, name, context): sig = ((), None, None, {}) elif not first.endswith(")"): raise TemplateError( - "Function definition doesn't end with ): %s" % first, + f"Function definition doesn't end with ): {first}", position=start, name=name, ) @@ -976,7 +968,7 @@ def get_token(pos=False): tok_type, tok_string = get_token() if tok_type != tokenize.NAME: raise TemplateError( - "Invalid signature: (%s)" % sig_text, position=pos, name=name + f"Invalid signature: ({sig_text})", position=pos, name=name ) var_name = tok_string tok_type, tok_string = get_token() @@ -994,7 +986,7 @@ def get_token(pos=False): continue if var_arg_type is not None: raise TemplateError( - "Invalid signature: (%s)" % sig_text, position=pos, name=name + f"Invalid signature: ({sig_text})", position=pos, name=name ) if tok_type == tokenize.OP and tok_string == "=": nest_type = None @@ -1009,7 +1001,7 @@ def get_token(pos=False): end_pos = e if tok_type == tokenize.ENDMARKER and nest_count: raise TemplateError( - "Invalid signature: (%s)" % sig_text, position=pos, name=name + f"Invalid signature: ({sig_text})", position=pos, name=name ) if not nest_count and ( tok_type == tokenize.ENDMARKER @@ -1098,7 +1090,7 @@ def fill_command(args=None): vars.update(os.environ) for value in args: if "=" not in value: - print("Bad argument: %r" % value) + print(f"Bad argument: {value!r}") sys.exit(2) name, value = value.split("=", 1) if name.startswith("py:"): diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index 18b250f9972b..ede50aaeefc3 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -13,9 +13,11 @@ # disables OpenBLAS affinity setting of the main thread that limits # python threads or processes to one core env_added = [] -for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']: +for envkey in ['OPENBLAS_MAIN_FREE']: if envkey not in os.environ: - os.environ[envkey] = '1' + # Note: using `putenv` (and `unsetenv` further down) instead of updating + # `os.environ` on purpose to avoid a race condition, see gh-30627. + os.putenv(envkey, '1') env_added.append(envkey) try: @@ -83,7 +85,7 @@ raise ImportError(msg) from exc finally: for envkey in env_added: - del os.environ[envkey] + os.unsetenv(envkey) del envkey del env_added del os diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 6b937389defe..63ede7cc2c0b 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -418,11 +418,11 @@ original data when the :meth:`~object.__exit__` function is called but not before: - >>> a = np.arange(6, dtype='i4')[::-2] + >>> a = np.arange(6, dtype=np.int32)[::-2] >>> with np.nditer(a, [], ... [['writeonly', 'updateifcopy']], ... casting='unsafe', - ... op_dtypes=[np.dtype('f4')]) as i: + ... op_dtypes=[np.dtype(np.float32)]) as i: ... x = i.operands[0] ... x[:] = [-1, -2, -3] ... # a still unchanged here @@ -939,7 +939,7 @@ ``NPY_MAXDIMS``). Setting ``ndmax`` stops recursion at the specified depth, preserving deeper nested structures as objects instead of promoting them to - higher-dimensional arrays. In this case, ``dtype=object`` is required. + higher-dimensional arrays. In this case, ``dtype=np.object_`` is required. .. versionadded:: 2.4.0 ${ARRAY_FUNCTION_LIKE} @@ -994,7 +994,7 @@ Type provided: - >>> np.array([1, 2, 3], dtype=complex) + >>> np.array([1, 2, 3], dtype=np.complex128) array([ 1.+0.j, 2.+0.j, 3.+0.j]) Data-type consisting of more than one element: @@ -1015,14 +1015,14 @@ Limiting the maximum dimensions with ``ndmax``: - >>> a = np.array([[1, 2], [3, 4]], dtype=object, ndmax=2) + >>> a = np.array([[1, 2], [3, 4]], dtype=np.object_, ndmax=2) >>> a array([[1, 2], [3, 4]], dtype=object) >>> a.shape (2, 2) - >>> b = np.array([[1, 2], [3, 4]], dtype=object, ndmax=1) + >>> b = np.array([[1, 2], [3, 4]], dtype=np.object_, ndmax=1) >>> b array([list([1, 2]), list([3, 4])], dtype=object) >>> b.shape @@ -1389,7 +1389,7 @@ array([[ -9.74499359e+001, 6.69583040e-309], [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized - >>> np.empty([2, 2], dtype=int) + >>> np.empty([2, 2], dtype=np.int_) array([[-1073741821, -1067949133], [ 496041986, 19249760]]) #uninitialized @@ -1455,7 +1455,7 @@ >>> np.zeros(5) array([ 0., 0., 0., 0., 0.]) - >>> np.zeros((5,), dtype=int) + >>> np.zeros((5,), dtype=np.int_) array([0, 0, 0, 0, 0]) >>> np.zeros((2, 1)) @@ -1482,10 +1482,12 @@ """) -# Signature can be updated for 2.5.0 release, see gh-30235 for details add_newdoc('numpy._core.multiarray', 'fromstring', """ - fromstring(string, dtype=float, count=-1, *, sep, like=None) + fromstring(string, dtype=None, count=-1, *, sep, like=None) + -- + + fromstring(string, dtype=np.float64, count=-1, *, sep, like=None) A new 1-D array initialized from text data in a string. @@ -1536,9 +1538,9 @@ Examples -------- >>> import numpy as np - >>> np.fromstring('1 2', dtype=int, sep=' ') + >>> np.fromstring('1 2', dtype=np.int_, sep=' ') array([1, 2]) - >>> np.fromstring('1, 2', dtype=int, sep=',') + >>> np.fromstring('1, 2', dtype=np.int_, sep=',') array([1, 2]) """) @@ -1647,7 +1649,7 @@ fromfile(file, dtype=None, count=-1, sep='', offset=0, *, like=None) -- - fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None) + fromfile(file, dtype=np.float64, count=-1, sep='', offset=0, *, like=None) Construct an array from data in a text or binary file. @@ -1735,7 +1737,7 @@ frombuffer(buffer, dtype=None, count=-1, offset=0, *, like=None) -- - frombuffer(buffer, dtype=float, count=-1, offset=0, *, like=None) + frombuffer(buffer, dtype=np.float64, count=-1, offset=0, *, like=None) Interpret a buffer as a 1-dimensional array. @@ -1768,7 +1770,7 @@ If the buffer has data that is not in machine byte-order, this should be specified as part of the data-type, e.g.:: - >>> dt = np.dtype(int) + >>> dt = np.dtype(np.int_) >>> dt = dt.newbyteorder('>') >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP @@ -1923,9 +1925,9 @@ `start` is much larger than `step`. This can lead to unexpected behaviour. For example:: - >>> np.arange(0, 5, 0.5, dtype=int) + >>> np.arange(0, 5, 0.5, dtype=np.int_) array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) - >>> np.arange(-3, 3, 0.5, dtype=int) + >>> np.arange(-3, 3, 0.5, dtype=np.int_) array([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) In such cases, the use of `numpy.linspace` should be preferred. @@ -2028,16 +2030,16 @@ Examples -------- >>> import numpy as np - >>> np.promote_types('f4', 'f8') + >>> np.promote_types(np.float32, np.float64) dtype('float64') - >>> np.promote_types('i8', 'f4') + >>> np.promote_types(np.int64, np.float32) dtype('float64') >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8') + >>> np.promote_types(np.int32, 'S8') dtype('S11') An example of a non-associative case: @@ -2374,7 +2376,7 @@ ndarray(shape, dtype=None, buffer=None, offset=0, strides=None, order=None) -- - ndarray(shape, dtype=float, buffer=None, offset=0, strides=None, order=None) + ndarray(shape, dtype=np.float64, buffer=None, offset=0, strides=None, order=None) An array object represents a multidimensional, homogeneous array of fixed-size items. An associated data-type object describes the @@ -2483,7 +2485,7 @@ First mode, `buffer` is None: >>> import numpy as np - >>> np.ndarray(shape=(2,2), dtype=float, order='F') + >>> np.ndarray(shape=(2,2), dtype=np.float64, order='F') array([[0.0e+000, 0.0e+000], # random [ nan, 2.5e-323]]) @@ -2491,7 +2493,7 @@ >>> np.ndarray((2,), buffer=np.array([1,2,3]), ... offset=np.int_().itemsize, - ... dtype=int) # offset = 1*itemsize, i.e. skip first element + ... dtype=np.int_) # offset = 1*itemsize, i.e. skip first element array([2, 3]) """) @@ -2663,6 +2665,11 @@ """ The imaginary part of the array. + Returns a view into the original array for complex arrays. + For non-complex arrays, returns a zero array of the same dtype. + For ``object`` arrays returns elementwise ``.imag`` or ``0`` + if ``.imag`` is undefined. + Examples -------- >>> import numpy as np @@ -2854,6 +2861,9 @@ """ The real part of the array. + Usually returns a view into the original array, but returns + elementwise ``.real`` for arrays of objects. + Examples -------- >>> import numpy as np @@ -2883,7 +2893,7 @@ .. warning:: - Setting ``arr.shape`` is discouraged and may be deprecated in the + Setting ``arr.shape`` is deprecated and may be removed in the future. Using `ndarray.reshape` is the preferred approach. Examples @@ -2895,20 +2905,6 @@ >>> y = np.zeros((2, 3, 4)) >>> y.shape (2, 3, 4) - >>> y.shape = (3, 8) - >>> y - array([[ 0., 0., 0., 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0., 0., 0., 0.]]) - >>> y.shape = (3, 6) - Traceback (most recent call last): - File "", line 1, in - ValueError: cannot reshape array of size 24 into shape (3,6) - >>> np.zeros((4,2))[::2].shape = (-1,) - Traceback (most recent call last): - File "", line 1, in - AttributeError: Incompatible shape for in-place modification. Use - `.reshape()` to make a copy with the desired shape. See Also -------- @@ -3571,15 +3567,15 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: >>> x array([1. , 2. , 2.5]) - >>> x.astype(int) + >>> x.astype(np.int_) array([1, 2, 2]) - >>> x.astype(int, casting="same_value") + >>> x.astype(np.int_, casting="same_value") Traceback (most recent call last): ... ValueError: could not cast 'same_value' double to long - >>> x[:2].astype(int, casting="same_value") + >>> x[:2].astype(np.int_, casting="same_value") array([1, 2]) """) @@ -3749,12 +3745,12 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: >>> y.flags['C_CONTIGUOUS'] True - For arrays containing Python objects (e.g. dtype=object), + For arrays containing Python objects (e.g. dtype=np.object_), the copy is a shallow one. The new array will contain the same object which may lead to surprises if that object can be modified (is mutable): - >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=np.object_) >>> b = a.copy() >>> b[2][0] = 10 >>> a @@ -3764,7 +3760,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: use `copy.deepcopy`: >>> import copy - >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=np.object_) >>> c = copy.deepcopy(a) >>> c[2][0] = 10 >>> c @@ -3866,7 +3862,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: to a single array element. The following is a rare example where this distinction is important: - >>> a = np.array([None, None], dtype=object) + >>> a = np.array([None, None], dtype=np.object_) >>> a[0] = np.array(3) >>> a array([array(3), None], dtype=object) @@ -4018,7 +4014,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: For an array with object dtype, elements are returned as-is. - >>> a = np.array([np.int64(1)], dtype=object) + >>> a = np.array([np.int64(1)], dtype=np.object_) >>> a.item() #return np.int64 np.int64(1) """) @@ -4176,6 +4172,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: Shape of resized array. refcheck : bool, optional If False, reference count will not be checked. Default is True. + See Notes below for more explanation. Returns ------- @@ -4184,15 +4181,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: Raises ------ ValueError - If `a` does not own its own data or references or views to it exist, - and the data memory must be changed. - PyPy only: will always raise if the data memory must be changed, since - there is no reliable way to determine if references or views to it - exist. - - SystemError - If the `order` keyword argument is specified. This behaviour is a - bug in NumPy. + If `a` does not own its own data or references or views to may exist. See Also -------- @@ -4205,12 +4194,29 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: Only contiguous arrays (data elements consecutive in memory) can be resized. + Reallocating arrays in-place can often lead to memory fragmentation and + should be avoided. If the goal is to reclaim over-allocated memory, + alternatives are to create a view or a copy of just the desired data, or + using two passes to build the array: one to cheaply determine the shape and + another to allocate and fill. Benchmark your use case to determine what is + optimum. You may be surprised to find ``resize`` actually slows down or + bloats your application. + The purpose of the reference count check is to make sure you do not use this array as a buffer for another Python object and then - reallocate the memory. However, reference counts can increase in - other ways so if you are sure that you have not shared the memory - for this array with another Python object, then you may safely set - `refcheck` to False. + reallocate the memory. + + On Python 3.13 and older, the check allows objects with exactly one + reference to be reallocated in-place. On Python 3.14 and newer, the array + must be uniquely referenced. See [1]_ for more details. + + If you are sure that you have not shared the memory for this array with + another Python object, then you may safely set `refcheck` to False. + + + References + ---------- + .. [1] Python 3.14 What's New, https://docs.python.org/3/whatsnew/3.14.html#whatsnew314-refcount Examples -------- @@ -4778,7 +4784,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: .. note:: Passing None for ``dtype`` is different from omitting the parameter, since the former invokes ``dtype(None)`` which is an alias for - ``dtype('float64')``. + ``dtype(np.float64)``. Parameters ---------- @@ -4966,32 +4972,6 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: raise a TypeError """) -add_newdoc('numpy._core.umath', '_add_newdoc_ufunc', - """ - add_ufunc_docstring(ufunc, new_docstring) - - Replace the docstring for a ufunc with new_docstring. - This method will only work if the current docstring for - the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.) - - Parameters - ---------- - ufunc : numpy.ufunc - A ufunc whose current doc is NULL. - new_docstring : string - The new docstring for the ufunc. - - Notes - ----- - This method allocates memory for new_docstring on - the heap. Technically this creates a memory leak, since this - memory will not be reclaimed until the end of the program - even if the ufunc itself is removed. However this will only - be a problem if the user is repeatedly creating ufuncs with - no documentation, adding documentation via add_newdoc_ufunc, - and then throwing away the ufunc. - """) - add_newdoc('numpy._core.multiarray', 'get_handler_name', """ get_handler_name(a: ndarray) -> str | None @@ -5865,8 +5845,8 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: This API requires passing dtypes, define them for convenience: >>> import numpy as np - >>> int32 = np.dtype("int32") - >>> float32 = np.dtype("float32") + >>> int32 = np.dtype(np.int32) + >>> float32 = np.dtype(np.float32) The typical ufunc call does not pass an output dtype. `numpy.add` has two inputs and one output, so leave the output as ``None`` (not provided): @@ -6093,11 +6073,11 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: -------- >>> import numpy as np - >>> x = np.dtype('i4') + >>> x = np.dtype(np.int32) >>> x.alignment 4 - >>> x = np.dtype(float) + >>> x = np.dtype(np.float64) >>> x.alignment 8 @@ -6122,11 +6102,11 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: -------- >>> import numpy as np - >>> dt = np.dtype('i2') + >>> dt = np.dtype(np.int16) >>> dt.byteorder '=' >>> # endian is not relevant for 8 bit numbers - >>> np.dtype('i1').byteorder + >>> np.dtype(np.int8).byteorder '|' >>> # or ASCII strings >>> np.dtype('S2').byteorder @@ -6272,13 +6252,13 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: -------- >>> import numpy as np - >>> dt = np.dtype('i2') + >>> dt = np.dtype(np.int16) >>> dt.isbuiltin 1 - >>> dt = np.dtype('f8') + >>> dt = np.dtype(np.float64) >>> dt.isbuiltin 1 - >>> dt = np.dtype([('field1', 'f8')]) + >>> dt = np.dtype([('field1', np.float64)]) >>> dt.isbuiltin 0 @@ -6346,13 +6326,13 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: -------- >>> import numpy as np - >>> dt = np.dtype('i4') + >>> dt = np.dtype(np.int32) >>> dt.kind 'i' - >>> dt = np.dtype('f8') + >>> dt = np.dtype(np.float64) >>> dt.kind 'f' - >>> dt = np.dtype([('field1', 'f8')]) + >>> dt = np.dtype([('field1', np.float64)]) >>> dt.kind 'V' @@ -6518,7 +6498,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: >>> x.subdtype (dtype('float32'), (8,)) - >>> x = np.dtype('i2') + >>> x = np.dtype(np.int16) >>> x.subdtype >>> @@ -6540,7 +6520,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: >>> x.base dtype('float32') - >>> x = np.dtype('i2') + >>> x = np.dtype(np.int16) >>> x.base dtype('int16') diff --git a/numpy/_core/_asarray.pyi b/numpy/_core/_asarray.pyi index 6bef69d8e4ea..07adc83fbcff 100644 --- a/numpy/_core/_asarray.pyi +++ b/numpy/_core/_asarray.pyi @@ -1,30 +1,28 @@ from collections.abc import Iterable -from typing import Any, Literal, TypeAlias, TypeVar, overload +from typing import Any, Literal, overload from numpy._typing import DTypeLike, NDArray, _SupportsArrayFunc __all__ = ["require"] -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) - -_Requirements: TypeAlias = Literal[ +type _Requirements = Literal[ "C", "C_CONTIGUOUS", "CONTIGUOUS", "F", "F_CONTIGUOUS", "FORTRAN", "A", "ALIGNED", "W", "WRITEABLE", "O", "OWNDATA" ] -_E: TypeAlias = Literal["E", "ENSUREARRAY"] -_RequirementsWithE: TypeAlias = _Requirements | _E +type _E = Literal["E", "ENSUREARRAY"] +type _RequirementsWithE = _Requirements | _E @overload -def require( - a: _ArrayT, +def require[ArrayT: NDArray[Any]]( + a: ArrayT, dtype: None = None, requirements: _Requirements | Iterable[_Requirements] | None = None, *, like: _SupportsArrayFunc | None = None -) -> _ArrayT: ... +) -> ArrayT: ... @overload def require( a: object, diff --git a/numpy/_core/_dtype.py b/numpy/_core/_dtype.py index 6a8a091b269c..9ae361fe651c 100644 --- a/numpy/_core/_dtype.py +++ b/numpy/_core/_dtype.py @@ -116,15 +116,15 @@ def _scalar_str(dtype, short): if _isunsized(dtype): return "'S'" else: - return "'S%d'" % dtype.itemsize + return f"'S{dtype.itemsize}'" elif dtype.type == np.str_: if _isunsized(dtype): return f"'{byteorder}U'" else: - return "'%sU%d'" % (byteorder, dtype.itemsize / 4) + return f"'{byteorder}U{dtype.itemsize // 4}'" - elif dtype.type == str: + elif dtype.type is str: return "'T'" elif not type(dtype)._legacy: @@ -136,7 +136,7 @@ def _scalar_str(dtype, short): if _isunsized(dtype): return "'V'" else: - return "'V%d'" % dtype.itemsize + return f"'V{dtype.itemsize}'" elif dtype.type == np.datetime64: return f"'{byteorder}M8{_datetime_metadata_str(dtype)}'" @@ -150,11 +150,11 @@ def _scalar_str(dtype, short): elif np.issubdtype(dtype, np.number): # Short repr with endianness, like ' str: ... # @overload -def _unpack_field(dtype: np.dtype, offset: int, title: _T) -> tuple[np.dtype, int, _T]: ... +def _unpack_field[T](dtype: np.dtype, offset: int, title: T) -> tuple[np.dtype, int, T]: ... @overload def _unpack_field(dtype: np.dtype, offset: int, title: None = None) -> tuple[np.dtype, int, None]: ... def _aligned_offset(offset: int, alignment: int) -> int: ... diff --git a/numpy/_core/_exceptions.pyi b/numpy/_core/_exceptions.pyi index b340fde3e463..dd559be44fee 100644 --- a/numpy/_core/_exceptions.pyi +++ b/numpy/_core/_exceptions.pyi @@ -1,17 +1,11 @@ from collections.abc import Iterable -from typing import Any, Final, TypeVar, overload +from typing import Any, Final, overload import numpy as np from numpy import _CastingKind ### -_T = TypeVar("_T") -_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]]) -_ExceptionT = TypeVar("_ExceptionT", bound=Exception) - -### - class UFuncTypeError(TypeError): ufunc: Final[np.ufunc] def __init__(self, /, ufunc: np.ufunc) -> None: ... @@ -21,7 +15,7 @@ class _UFuncNoLoopError(UFuncTypeError): def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... class _UFuncBinaryResolutionError(_UFuncNoLoopError): - dtypes: tuple[np.dtype, np.dtype] + dtypes: tuple[np.dtype, np.dtype] # pyrefly: ignore[bad-override] def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... class _UFuncCastingError(UFuncTypeError): @@ -48,7 +42,7 @@ class _ArrayMemoryError(MemoryError): def _size_to_string(num_bytes: int) -> str: ... @overload -def _unpack_tuple(tup: tuple[_T]) -> _T: ... +def _unpack_tuple[T](tup: tuple[T]) -> T: ... @overload -def _unpack_tuple(tup: _TupleT) -> _TupleT: ... -def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ... +def _unpack_tuple[TupleT: tuple[()] | tuple[Any, Any, *tuple[Any, ...]]](tup: TupleT) -> TupleT: ... +def _display_as_base[ExceptionT: Exception](cls: type[ExceptionT]) -> type[ExceptionT]: ... diff --git a/numpy/_core/_internal.py b/numpy/_core/_internal.py index 7c64daf30dbd..1fdbd8b37a07 100644 --- a/numpy/_core/_internal.py +++ b/numpy/_core/_internal.py @@ -20,8 +20,6 @@ except ImportError: ctypes = None -IS_PYPY = sys.implementation.name == 'pypy' - if sys.byteorder == 'little': _nbo = '<' else: @@ -171,8 +169,8 @@ def _commastring(astr): mo = sep_re.match(astr, pos=startindex) if not mo: raise ValueError( - 'format number %d of "%s" is not recognized' % - (len(result) + 1, astr)) + f'format number {len(result) + 1} of "{astr}" ' + 'is not recognized') startindex = mo.end() islist = True @@ -691,7 +689,7 @@ def __dtype_from_pep3118(stream, is_subdtype): is_padding = (typechar == 'x') dtypechar = type_map[typechar] if dtypechar in 'USV': - dtypechar += '%d' % itemsize + dtypechar += f'{itemsize}' itemsize = 1 numpy_byteorder = {'@': '=', '^': '='}.get( stream.byteorder, stream.byteorder) @@ -949,12 +947,8 @@ def npy_ctypes_check(cls): try: # ctypes class are new-style, so have an __mro__. This probably fails # for ctypes classes with multiple inheritance. - if IS_PYPY: - # (..., _ctypes.basics._CData, Bufferable, object) - ctype_base = cls.__mro__[-3] - else: - # # (..., _ctypes._CData, object) - ctype_base = cls.__mro__[-2] + # # (..., _ctypes._CData, object) + ctype_base = cls.__mro__[-2] # right now, they're part of the _ctypes module return '_ctypes' in ctype_base.__module__ except Exception: diff --git a/numpy/_core/_internal.pyi b/numpy/_core/_internal.pyi index 6e37022ffd56..777bcd5561b2 100644 --- a/numpy/_core/_internal.pyi +++ b/numpy/_core/_internal.pyi @@ -8,15 +8,10 @@ import numpy as np import numpy.typing as npt from numpy.ctypeslib import c_intp -_CastT = TypeVar("_CastT", bound=ct._CanCastTo) -_T_co = TypeVar("_T_co", covariant=True) -_CT = TypeVar("_CT", bound=ct._CData) _PT_co = TypeVar("_PT_co", bound=int | None, default=None, covariant=True) ### -IS_PYPY: Final[bool] = ... - format_re: Final[re.Pattern[str]] = ... sep_re: Final[re.Pattern[str]] = ... space_re: Final[re.Pattern[str]] = ... @@ -43,18 +38,18 @@ class _ctypes(Generic[_PT_co]): def _as_parameter_(self) -> ct.c_void_p: ... # - def data_as(self, /, obj: type[_CastT]) -> _CastT: ... - def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... - def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... + def data_as[CastT: ct._CanCastTo](self, /, obj: type[CastT]) -> CastT: ... + def shape_as[CT: ct._CData](self, /, obj: type[CT]) -> ct.Array[CT]: ... + def strides_as[CT: ct._CData](self, /, obj: type[CT]) -> ct.Array[CT]: ... -class dummy_ctype(Generic[_T_co]): - _cls: type[_T_co] +class dummy_ctype[T_co]: + _cls: type[T_co] - def __init__(self, /, cls: type[_T_co]) -> None: ... + def __init__(self, /, cls: type[T_co]) -> None: ... def __eq__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] def __ne__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] def __mul__(self, other: object, /) -> Self: ... - def __call__(self, /, *other: object) -> _T_co: ... + def __call__(self, /, *other: object) -> T_co: ... def array_ufunc_errmsg_formatter(dummy: object, ufunc: np.ufunc, method: str, *inputs: object, **kwargs: object) -> str: ... def array_function_errmsg_formatter(public_api: Callable[..., object], types: Iterable[str]) -> str: ... diff --git a/numpy/_core/_methods.pyi b/numpy/_core/_methods.pyi index 3c80683f003b..651c78d3530b 100644 --- a/numpy/_core/_methods.pyi +++ b/numpy/_core/_methods.pyi @@ -1,5 +1,5 @@ from collections.abc import Callable -from typing import Any, Concatenate, TypeAlias +from typing import Any, Concatenate import numpy as np @@ -7,7 +7,7 @@ from . import _exceptions as _exceptions ### -_Reduce2: TypeAlias = Callable[Concatenate[object, ...], Any] +type _Reduce2 = Callable[Concatenate[object, ...], Any] ### diff --git a/numpy/_core/_type_aliases.py b/numpy/_core/_type_aliases.py index 51c8e6ca2677..943955705083 100644 --- a/numpy/_core/_type_aliases.py +++ b/numpy/_core/_type_aliases.py @@ -72,7 +72,6 @@ "complex": "complex128", "object": "object_", "bytes": "bytes_", - "a": "bytes_", "int": "int_", "str": "str_", "unicode": "str_", diff --git a/numpy/_core/_type_aliases.pyi b/numpy/_core/_type_aliases.pyi index e28541cc8987..c7efe989caa5 100644 --- a/numpy/_core/_type_aliases.pyi +++ b/numpy/_core/_type_aliases.pyi @@ -1,5 +1,5 @@ from collections.abc import Collection -from typing import Final, Literal as L, TypeAlias, TypedDict, type_check_only +from typing import Final, Literal as L, TypedDict, type_check_only import numpy as np @@ -35,7 +35,7 @@ class _CNamesDict(TypedDict): c_names_dict: Final[_CNamesDict] -_AbstractTypeName: TypeAlias = L[ +type _AbstractTypeName = L[ "generic", "flexible", "character", diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index b16147c18ee6..6a7476670d95 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -57,6 +57,7 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): seterrcall : Set a callback function for the 'call' mode. geterr, geterrcall, errstate + Notes ----- The floating-point exceptions are defined in the IEEE 754 standard [1]_: @@ -68,6 +69,8 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): - Invalid operation: result is not an expressible number, typically indicates that a NaN was produced. + **Concurrency note:** see :ref:`fp_error_handling` + .. [1] https://en.wikipedia.org/wiki/IEEE_754 Examples @@ -127,6 +130,8 @@ def geterr(): For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- >>> import numpy as np @@ -172,6 +177,10 @@ def setbufsize(size): bufsize : int Previous size of ufunc buffer in bytes. + Notes + ----- + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- When exiting a `numpy.errstate` context manager the bufsize is restored: @@ -205,6 +214,12 @@ def getbufsize(): getbufsize : int Size of ufunc buffer in bytes. + Notes + ----- + + **Concurrency note:** see :doc:`/reference/routines.err` + + Examples -------- >>> import numpy as np @@ -256,6 +271,11 @@ def seterrcall(func): -------- seterr, geterr, geterrcall + Notes + ----- + + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- Callback upon error: @@ -331,6 +351,8 @@ def geterrcall(): For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :ref:`fp_error_handling` + Examples -------- >>> import numpy as np @@ -399,6 +421,8 @@ class errstate: For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :ref:`fp_error_handling` + Examples -------- >>> import numpy as np diff --git a/numpy/_core/_ufunc_config.pyi b/numpy/_core/_ufunc_config.pyi index f1f0d88fe165..039aa1d51223 100644 --- a/numpy/_core/_ufunc_config.pyi +++ b/numpy/_core/_ufunc_config.pyi @@ -1,7 +1,7 @@ from _typeshed import SupportsWrite from collections.abc import Callable from types import TracebackType -from typing import Any, Final, Literal, TypeAlias, TypedDict, TypeVar, type_check_only +from typing import Any, Final, Literal, TypedDict, type_check_only __all__ = [ "seterr", @@ -13,10 +13,8 @@ __all__ = [ "errstate", ] -_ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] -_ErrCall: TypeAlias = Callable[[str, int], Any] | SupportsWrite[str] - -_CallableT = TypeVar("_CallableT", bound=Callable[..., object]) +type _ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"] +type _ErrCall = Callable[[str, int], Any] | SupportsWrite[str] @type_check_only class _ErrDict(TypedDict): @@ -45,7 +43,7 @@ class errstate: under: _ErrKind | None = None, invalid: _ErrKind | None = None, ) -> None: ... - def __call__(self, /, func: _CallableT) -> _CallableT: ... + def __call__[FuncT: Callable[..., object]](self, /, func: FuncT) -> FuncT: ... def __enter__(self) -> None: ... def __exit__( self, diff --git a/numpy/_core/_umath_tests.pyi b/numpy/_core/_umath_tests.pyi index 696cec3b755e..cce84afdd081 100644 --- a/numpy/_core/_umath_tests.pyi +++ b/numpy/_core/_umath_tests.pyi @@ -30,6 +30,10 @@ def test_signature( # undocumented def test_dispatch() -> _TestDispatchResult: ... +# undocumented test helpers for PyUFunc_ReplaceLoopBySignature +def replace_loop(ufunc: np.ufunc, /) -> object: ... +def restore_loop(ufunc: np.ufunc, capsule: object, /) -> None: ... + # undocumented ufuncs and gufuncs always_error: Final[_UFunc_Nin2_Nout1[L["always_error"], L[1], None]] = ... always_error_unary: Final[_UFunc_Nin1_Nout1[L["always_error_unary"], L[1], None]] = ... diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 8d576d9e1d56..6cab73beaa4f 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -78,7 +78,7 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, if legacy is False: options['legacy'] = sys.maxsize - elif legacy == False: # noqa: E712 + elif legacy == False: warnings.warn( f"Passing `legacy={legacy!r}` is deprecated.", FutureWarning, stacklevel=3 @@ -248,6 +248,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, -------- get_printoptions, printoptions, array2string + Notes ----- @@ -255,6 +256,8 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, * Use `printoptions` as a context manager to set the values temporarily. * These print options apply only to NumPy ndarrays, not to scalars. + **Concurrency note:** see :ref:`text_formatting_options` + Examples -------- Floating point precision can be set: @@ -357,6 +360,8 @@ def get_printoptions(): ----- These print options apply only to NumPy ndarrays, not to scalars. + **Concurrency note:** see :ref:`text_formatting_options` + See Also -------- set_printoptions, printoptions @@ -419,6 +424,8 @@ def printoptions(*args, **kwargs): ----- These print options apply only to NumPy ndarrays, not to scalars. + **Concurrency note:** see :ref:`text_formatting_options` + """ token = _set_printoptions(*args, **kwargs) @@ -523,8 +530,16 @@ def _get_format_function(data, **options): dtype_ = data.dtype dtypeobj = dtype_.type formatdict = _get_formatdict(data, **options) + if dtypeobj is None: return formatdict["numpystr"]() + elif (getattr(dtypeobj, "__module__", None) != "numpy" + and not issubclass(dtypeobj, str)): + # Use `str()` as a default format for non-NumPy dtypes. This should be + # improved. We use `str` assuming that `repr` is likely to duplicate + # information that is contained in the dtype. + # (Do this early, because e.g. quaddtype subclasses floating.) + return formatdict['void']() elif issubclass(dtypeobj, _nt.bool): return formatdict['bool']() elif issubclass(dtypeobj, _nt.integer): @@ -957,7 +972,7 @@ def recurser(index, hanging_indent, curr_width): finally: # recursive closures have a cyclic reference to themselves, which # requires gc to collect (gh-10620). To avoid this problem, for - # performance and PyPy friendliness, we break the cycle: + # performance, we break the cycle: recurser = None def _none_or_positive_arg(x, name): @@ -1408,10 +1423,11 @@ def __call__(self, x): return super().__call__(x) def _format_non_nat(self, x): - return "'%s'" % datetime_as_string(x, - unit=self.unit, - timezone=self.timezone, - casting=self.casting) + datetime_str = datetime_as_string(x, + unit=self.unit, + timezone=self.timezone, + casting=self.casting) + return f"'{datetime_str}'" class TimedeltaFormat(_TimelikeFormat): diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 307f844634ca..d06c38539306 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -1,17 +1,11 @@ +# Aliases for builtins shadowed by classes to avoid annotations resolving to class members by ty +from builtins import object as py_object from collections.abc import Callable # Using a private class is by no means ideal, but it is simply a consequence # of a `contextlib.context` returning an instance of aforementioned class from contextlib import _GeneratorContextManager -from typing import ( - Any, - Final, - Literal, - SupportsIndex, - TypeAlias, - TypedDict, - type_check_only, -) +from typing import Any, Final, Literal, SupportsIndex, TypedDict, type_check_only import numpy as np from numpy._typing import NDArray, _CharLike_co, _FloatLike_co @@ -29,12 +23,12 @@ __all__ = [ ### -_FloatMode: TypeAlias = Literal["fixed", "unique", "maxprec", "maxprec_equal"] -_LegacyNoStyle: TypeAlias = Literal["1.21", "1.25", "2.1", False] -_Legacy: TypeAlias = Literal["1.13", _LegacyNoStyle] -_Sign: TypeAlias = Literal["-", "+", " "] -_Trim: TypeAlias = Literal["k", ".", "0", "-"] -_ReprFunc: TypeAlias = Callable[[NDArray[Any]], str] +type _FloatMode = Literal["fixed", "unique", "maxprec", "maxprec_equal"] +type _LegacyNoStyle = Literal["1.21", "1.25", "2.1", False] +type _Legacy = Literal["1.13", _LegacyNoStyle] +type _Sign = Literal["-", "+", " "] +type _Trim = Literal["k", ".", "0", "-"] +type _ReprFunc = Callable[[NDArray[Any]], str] @type_check_only class _FormatDict(TypedDict, total=False): @@ -48,8 +42,8 @@ class _FormatDict(TypedDict, total=False): longcomplexfloat: Callable[[np.clongdouble], str] void: Callable[[np.void], str] numpystr: Callable[[_CharLike_co], str] - object: Callable[[object], str] - all: Callable[[object], str] + object: Callable[[py_object], str] + all: Callable[[py_object], str] int_kind: Callable[[np.integer], str] float_kind: Callable[[np.floating], str] complex_kind: Callable[[np.complexfloating], str] diff --git a/numpy/_core/code_generators/cversions.txt b/numpy/_core/code_generators/cversions.txt index 3a480dfd4ab3..b058875d0455 100644 --- a/numpy/_core/code_generators/cversions.txt +++ b/numpy/_core/code_generators/cversions.txt @@ -83,4 +83,5 @@ # Version 21 (NumPy 2.4.0) # Add 'same_value' casting, header additions. # General loop registration for ufuncs, sort, and argsort +# Version 21 (NumPy 2.5.0) No change 0x00000015 = fbd24fc5b2ba4f7cd3606ec6128de7a5 diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index e97177e46153..e5c041d1af8b 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -17,11 +17,11 @@ def get_processor(): - # Convoluted because we can't import from numpy.distutils + # Convoluted because we can't import from numpy # (numpy is not yet built) conv_template_path = os.path.join( os.path.dirname(__file__), - '..', '..', 'distutils', 'conv_template.py' + '..', '..', '_build_utils', 'conv_template.py' ) spec = importlib.util.spec_from_file_location( 'conv_template', conv_template_path @@ -132,9 +132,9 @@ def __init__(self, arg): def __str__(self): try: - return ' '.join('NPY_STEALS_REF_TO_ARG(%d)' % x for x in self.arg) + return ' '.join(f'NPY_STEALS_REF_TO_ARG({x})' for x in self.arg) except TypeError: - return 'NPY_STEALS_REF_TO_ARG(%d)' % self.arg + return f'NPY_STEALS_REF_TO_ARG({self.arg})' class Function: @@ -337,10 +337,8 @@ def __init__(self, name, index, ptr_cast, api_name, internal_type=None): self.internal_type = internal_type def define_from_array_api_string(self): - return "#define %s (*(%s *)%s[%d])" % (self.name, - self.ptr_cast, - self.api_name, - self.index) + return (f"#define {self.name} (*({self.ptr_cast} *)" + f"{self.api_name}[{self.index}])") def array_api_define(self): return f" (void *) &{self.name}" @@ -369,10 +367,7 @@ def __init__(self, name, index, type, api_name): self.api_name = api_name def define_from_array_api_string(self): - return "#define %s (*(%s *)%s[%d])" % (self.name, - self.type, - self.api_name, - self.index) + return f"#define {self.name} (*({self.type} *){self.api_name}[{self.index}])" def array_api_define(self): return f" ({self.type} *) &{self.name}" @@ -392,10 +387,7 @@ def __init__(self, name, index, api_name): self.api_name = api_name def define_from_array_api_string(self): - return "#define %s ((%s *)%s[%d])" % (self.name, - self.type, - self.api_name, - self.index) + return f"#define {self.name} (({self.type} *){self.api_name}[{self.index}])" def array_api_define(self): return f" (void *) &{self.name}" diff --git a/numpy/_core/code_generators/generate_numpy_api.py b/numpy/_core/code_generators/generate_numpy_api.py index 23d678872ca4..b77cc842c927 100644 --- a/numpy/_core/code_generators/generate_numpy_api.py +++ b/numpy/_core/code_generators/generate_numpy_api.py @@ -58,7 +58,7 @@ #include "numpy/_public_dtype_api_table.h" #if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) -static int +static inline int _import_array(void) { int st; diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index f5d8530bbc58..ecb455767a24 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -57,7 +57,8 @@ class TypeDescription: If astype['x'] is 'y', uses PyUFunc_x_x_As_y_y/PyUFunc_xx_x_As_yy_y instead of PyUFunc_x_x/PyUFunc_xx_x. cfunc_alias : str or none, optional - Appended to inner loop C function name, e.g., FLOAT_{cfunc_alias}. See make_arrays. + Appended to inner loop C function name, e.g., FLOAT_{cfunc_alias}. + See make_arrays. NOTE: it doesn't support 'astype' dispatch : str or None, optional Dispatch-able source name without its extension '.dispatch.c' that @@ -202,9 +203,11 @@ class Ufunc: type_descriptions : TypeDescription objects signature: a generalized ufunc signature (like for matmul) indexed: add indexed loops (ufunc.at) for these type characters + no_float_errors: if True, the ufunc never raises floating point errors """ def __init__(self, nin, nout, identity, docstring, typereso, - *type_descriptions, signature=None, indexed=''): + *type_descriptions, signature=None, indexed='', + no_float_errors=False): self.nin = nin self.nout = nout if identity is None: @@ -215,6 +218,7 @@ def __init__(self, nin, nout, identity, docstring, typereso, self.type_descriptions = [] self.signature = signature self.indexed = indexed + self.no_float_errors = no_float_errors for td in type_descriptions: self.type_descriptions.extend(td) for td in self.type_descriptions: @@ -433,6 +437,7 @@ def english_upper(s): ('loops_autovec', ints), ]), TD(P, f='conjugate'), + no_float_errors=True, ), 'fmod': Ufunc(2, 1, None, @@ -500,6 +505,7 @@ def english_upper(s): TD(cmplx, dispatch=[('loops_unary_complex', 'FD')], out=('f', 'd', 'g')), TD(O, f='PyNumber_Absolute'), + no_float_errors=True, ), '_arg': Ufunc(1, 1, None, @@ -514,6 +520,7 @@ def english_upper(s): TD(ints + flts + timedeltaonly, dispatch=[('loops_unary', ints + 'fdg')]), TD(cmplx, f='neg'), TD(O, f='PyNumber_Negative'), + no_float_errors=True, ), 'positive': Ufunc(1, 1, None, @@ -522,12 +529,16 @@ def english_upper(s): TD(ints + flts + timedeltaonly), TD(cmplx, f='pos'), TD(O, f='PyNumber_Positive'), + no_float_errors=True, ), 'sign': Ufunc(1, 1, None, docstrings.get('numpy._core.umath.sign'), - 'PyUFunc_SimpleUniformOperationTypeResolver', - TD(nobool_or_datetime, dispatch=[('loops_autovec', ints)]), + 'PyUFunc_SignTypeResolver', + TD(ints + flts, dispatch=[('loops_autovec', ints)]), + TD(timedeltaonly, out='d'), + TD(cmplx + O), + no_float_errors=True, ), 'greater': Ufunc(2, 1, None, @@ -539,6 +550,7 @@ def english_upper(s): TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], + no_float_errors=True, ), 'greater_equal': Ufunc(2, 1, None, @@ -550,6 +562,7 @@ def english_upper(s): TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], + no_float_errors=True, ), 'less': Ufunc(2, 1, None, @@ -561,6 +574,7 @@ def english_upper(s): TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], + no_float_errors=True, ), 'less_equal': Ufunc(2, 1, None, @@ -572,6 +586,7 @@ def english_upper(s): TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], + no_float_errors=True, ), 'equal': Ufunc(2, 1, None, @@ -583,6 +598,7 @@ def english_upper(s): TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], + no_float_errors=True, ), 'not_equal': Ufunc(2, 1, None, @@ -594,6 +610,7 @@ def english_upper(s): TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], + no_float_errors=True, ), 'logical_and': Ufunc(2, 1, True_, @@ -604,6 +621,7 @@ def english_upper(s): ('loops_autovec', ints), ]), TD(O, f='npy_ObjectLogicalAnd'), + no_float_errors=True, ), 'logical_not': Ufunc(1, 1, None, @@ -614,6 +632,7 @@ def english_upper(s): ('loops_autovec', ints), ]), TD(O, f='npy_ObjectLogicalNot'), + no_float_errors=True, ), 'logical_or': Ufunc(2, 1, False_, @@ -624,6 +643,7 @@ def english_upper(s): ('loops_autovec', ints), ]), TD(O, f='npy_ObjectLogicalOr'), + no_float_errors=True, ), 'logical_xor': Ufunc(2, 1, False_, @@ -636,6 +656,7 @@ def english_upper(s): ]), # TODO: using obj.logical_xor() seems pretty much useless: TD(P, f='logical_xor'), + no_float_errors=True, ), 'maximum': Ufunc(2, 1, ReorderableNone, @@ -645,6 +666,7 @@ def english_upper(s): TD(no_obj_bool, dispatch=[('loops_minmax', ints + 'fdg')]), TD(O, f='npy_ObjectMax'), indexed=flts + ints, + no_float_errors=True, ), 'minimum': Ufunc(2, 1, ReorderableNone, @@ -655,6 +677,7 @@ def english_upper(s): TD(no_obj_bool, dispatch=[('loops_minmax', ints + 'fdg')]), TD(O, f='npy_ObjectMin'), indexed=flts + ints, + no_float_errors=True, ), 'clip': Ufunc(3, 1, ReorderableNone, @@ -671,6 +694,7 @@ def english_upper(s): TD(no_obj_bool, dispatch=[('loops_minmax', 'fdg')]), TD(O, f='npy_ObjectMax'), indexed=flts + ints, + no_float_errors=True, ), 'fmin': Ufunc(2, 1, ReorderableNone, @@ -681,6 +705,7 @@ def english_upper(s): TD(no_obj_bool, dispatch=[('loops_minmax', 'fdg')]), TD(O, f='npy_ObjectMin'), indexed=flts + ints, + no_float_errors=True, ), 'logaddexp': Ufunc(2, 1, MinusInfinity, @@ -702,6 +727,7 @@ def english_upper(s): dispatch=[('loops_logical', '?')]), TD(ints, dispatch=[('loops_autovec', ints)]), TD(O, f='PyNumber_And'), + no_float_errors=True, ), 'bitwise_or': Ufunc(2, 1, Zero, @@ -710,6 +736,7 @@ def english_upper(s): TD('?', cfunc_alias='logical_or', dispatch=[('loops_logical', '?')]), TD(ints, dispatch=[('loops_autovec', ints)]), TD(O, f='PyNumber_Or'), + no_float_errors=True, ), 'bitwise_xor': Ufunc(2, 1, Zero, @@ -719,6 +746,7 @@ def english_upper(s): dispatch=[('loops_comparison', '?')]), TD(ints, dispatch=[('loops_autovec', ints)]), TD(O, f='PyNumber_Xor'), + no_float_errors=True, ), 'invert': Ufunc(1, 1, None, @@ -728,6 +756,7 @@ def english_upper(s): dispatch=[('loops_logical', '?')]), TD(ints, dispatch=[('loops_autovec', ints)]), TD(O, f='PyNumber_Invert'), + no_float_errors=True, ), 'left_shift': Ufunc(2, 1, None, @@ -735,6 +764,7 @@ def english_upper(s): None, TD(ints, dispatch=[('loops_autovec', ints)]), TD(O, f='PyNumber_Lshift'), + no_float_errors=True, ), 'right_shift': Ufunc(2, 1, None, @@ -742,6 +772,7 @@ def english_upper(s): None, TD(ints, dispatch=[('loops_autovec', ints)]), TD(O, f='PyNumber_Rshift'), + no_float_errors=True, ), 'heaviside': Ufunc(2, 1, None, @@ -975,6 +1006,7 @@ def english_upper(s): None, TD(flts, f='fabs', astype={'e': 'f'}), TD(P, f='fabs'), + no_float_errors=True, ), 'floor': Ufunc(1, 1, None, @@ -1037,6 +1069,7 @@ def english_upper(s): ('loops_unary_fp_le', inexactvec), ('loops_autovec', bints), ]), + no_float_errors=True, ), 'isnat': Ufunc(1, 1, None, @@ -1052,6 +1085,7 @@ def english_upper(s): ('loops_unary_fp_le', inexactvec), ('loops_autovec', bints + 'mM'), ]), + no_float_errors=True, ), 'isfinite': Ufunc(1, 1, None, @@ -1061,18 +1095,21 @@ def english_upper(s): ('loops_unary_fp_le', inexactvec), ('loops_autovec', bints), ]), + no_float_errors=True, ), 'signbit': Ufunc(1, 1, None, docstrings.get('numpy._core.umath.signbit'), None, TD(flts, out='?', dispatch=[('loops_unary_fp_le', inexactvec)]), + no_float_errors=True, ), 'copysign': Ufunc(2, 1, None, docstrings.get('numpy._core.umath.copysign'), None, TD(flts), + no_float_errors=True, ), 'nextafter': Ufunc(2, 1, None, @@ -1169,6 +1206,18 @@ def english_upper(s): TD(O), signature='(n),(n,m)->(m)', ), +# Real and imag ufunc helpers (loops added later): +'real': + Ufunc(1, 1, None, + docstrings.get('numpy._core.umath.real'), + None, + ), +'imag': + Ufunc(1, 1, None, + docstrings.get('numpy._core.umath.imag'), + None, + ), +# String ufuncs (loops added later): 'str_len': Ufunc(1, 1, Zero, docstrings.get('numpy._core.umath.str_len'), @@ -1433,19 +1482,16 @@ def make_arrays(funcdict): astype = '' if t.astype is not None: astype = f'_As_{thedict[t.astype]}' - astr = ('%s_functions[%d] = PyUFunc_%s%s;' % - (name, k, thedict[t.type], astype)) + astr = f'{name}_functions[{k}] = PyUFunc_{thedict[t.type]}{astype};' code2list.append(astr) if t.type == 'O': - astr = ('%s_data[%d] = (void *) %s;' % - (name, k, t.func_data)) + astr = f'{name}_data[{k}] = (void *) {t.func_data};' code2list.append(astr) datalist.append('(void *)NULL') elif t.type == 'P': datalist.append(f'(void *)"{t.func_data}"') else: - astr = ('%s_data[%d] = (void *) %s;' % - (name, k, t.func_data)) + astr = f'{name}_data[{k}] = (void *) {t.func_data};' code2list.append(astr) datalist.append('(void *)NULL') #datalist.append('(void *)%s' % t.func_data) @@ -1468,12 +1514,9 @@ def make_arrays(funcdict): signames = ', '.join(siglist) datanames = ', '.join(datalist) code1list.append( - "static PyUFuncGenericFunction %s_functions[] = {%s};" - % (name, funcnames)) - code1list.append("static void * %s_data[] = {%s};" - % (name, datanames)) - code1list.append("static const char %s_signatures[] = {%s};" - % (name, signames)) + f"static PyUFuncGenericFunction {name}_functions[] = {{{funcnames}}};") + code1list.append(f"static void * {name}_data[] = {{{datanames}}};") + code1list.append(f"static const char {name}_signatures[] = {{{signames}}};") uf.empty = False else: uf.empty = True @@ -1483,10 +1526,15 @@ def make_arrays(funcdict): #include "{dname}.dispatch.h" """)) for (ufunc_name, func_idx, cfunc_name, inout) in funcs: - code2list.append(textwrap.dedent(f"""\ + call_text = ( + f"NPY_CPU_DISPATCH_CALL_XB(" + f"{ufunc_name}_functions[{func_idx}] = {cfunc_name});" + ) + text = f"""\ NPY_CPU_DISPATCH_TRACE("{ufunc_name}", "{''.join(inout)}"); - NPY_CPU_DISPATCH_CALL_XB({ufunc_name}_functions[{func_idx}] = {cfunc_name}); - """)) + {call_text} + """ + code2list.append(textwrap.dedent(text)) return "\n".join(code1list), "\n".join(code2list) def make_ufuncs(funcdict): @@ -1538,11 +1586,10 @@ def make_ufuncs(funcdict): mlist.append(fmt.format(**args)) if uf.typereso is not None: - mlist.append( - r"((PyUFuncObject *)f)->type_resolver = &%s;" % uf.typereso) + mlist.append(rf"((PyUFuncObject *)f)->type_resolver = &{uf.typereso};") for c in uf.indexed: # Handle indexed loops by getting the underlying ArrayMethodObject - # from the list in f._loops and setting its field appropriately + # from the dict in f._loops and setting its field appropriately fmt = textwrap.dedent(""" {{ PyArray_DTypeMeta *dtype = PyArray_DTypeFromTypeNum({typenum}); @@ -1574,7 +1621,11 @@ def make_ufuncs(funcdict): funcname=f"{english_upper(chartoname[c])}_{name}_indexed", )) - mlist.append(r"""PyDict_SetItemString(dictionary, "%s", f);""" % name) + if uf.no_float_errors: + mlist.append( + r"((PyUFuncObject *)f)->_ufunc_flags |=" + r" UFUNC_NO_FLOATINGPOINT_ERRORS;") + mlist.append(rf"""PyDict_SetItemString(dictionary, "{name}", f);""") mlist.append(r"""Py_DECREF(f);""") code3list.append('\n'.join(mlist)) return '\n'.join(code3list) diff --git a/numpy/_core/code_generators/numpy_api.py b/numpy/_core/code_generators/numpy_api.py index ac108aa20370..c2b471c71757 100644 --- a/numpy/_core/code_generators/numpy_api.py +++ b/numpy/_core/code_generators/numpy_api.py @@ -19,7 +19,7 @@ def get_annotations(): - # Convoluted because we can't import from numpy.distutils + # Convoluted because we can't import numpy # (numpy is not yet built) genapi_py = os.path.join(os.path.dirname(__file__), 'genapi.py') spec = importlib.util.spec_from_file_location('conv_template', genapi_py) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index e976be723287..6973ba34322e 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -332,7 +332,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'arcsinh', """ - Inverse hyperbolic sine element-wise. + Inverse hyperbolic sine, element-wise. Parameters ---------- @@ -534,7 +534,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'arctanh', """ - Inverse hyperbolic tangent element-wise. + Inverse hyperbolic tangent, element-wise. Parameters ---------- @@ -917,7 +917,7 @@ def add_newdoc(place, name, doc): array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00]) >>> >>> # Example of providing the optional output parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.cos([0.1], out1) >>> out2 is out1 True @@ -1145,7 +1145,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -1506,7 +1506,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 @@ -1545,7 +1545,7 @@ def add_newdoc(place, name, doc): ------- out : bool or ndarray of bool Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -1619,7 +1619,7 @@ def add_newdoc(place, name, doc): returned. In a two's-complement system, this operation effectively flips all the bits, resulting in a representation that corresponds to the negative of the input plus one. This is the most common method of - representing signed integers on computers [1]_. A N-bit two's-complement + representing signed integers on computers [1]_. An N-bit two's-complement system can represent every integer in the range :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. @@ -1870,7 +1870,7 @@ def add_newdoc(place, name, doc): Examples -------- >>> import numpy as np - >>> np.isnat(np.datetime64("NaT")) + >>> np.isnat(np.datetime64("NaT", "D")) True >>> np.isnat(np.datetime64("2016-01-01")) False @@ -1957,7 +1957,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -1994,7 +1994,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -3174,7 +3174,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'negative', """ - Numerical negative, element-wise. + Numerical negation, element-wise. Parameters ---------- @@ -3255,7 +3255,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -3373,9 +3373,9 @@ def add_newdoc(place, name, doc): >>> p array([nan, nan]) - To get complex results, give the argument ``dtype=complex``. + To get complex results, give the argument ``dtype=np.complex128``. - >>> np.power(x3, 1.5, dtype=complex) + >>> np.power(x3, 1.5, dtype=np.complex128) array([-1.83697020e-16-1.j, -1.46957616e-15-8.j]) """) @@ -3452,9 +3452,9 @@ def add_newdoc(place, name, doc): >>> p array([nan, nan]) - To get complex results, give the argument ``dtype=complex``. + To get complex results, give the argument ``dtype=np.complex128``. - >>> np.float_power(x3, 1.5, dtype=complex) + >>> np.float_power(x3, 1.5, dtype=np.complex128) array([-1.83697020e-16-1.j, -1.46957616e-15-8.j]) """) @@ -4055,7 +4055,7 @@ def add_newdoc(place, name, doc): >>> # Discrepancy due to vagaries of floating point arithmetic. >>> # Example of providing the optional output parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.sinh([0.1], out1) >>> out2 is out1 True @@ -4256,7 +4256,7 @@ def add_newdoc(place, name, doc): >>> >>> # Example of providing the optional output parameter illustrating >>> # that what is returned is a reference to said parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.cos([0.1], out1) >>> out2 is out1 True @@ -4271,7 +4271,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'tanh', """ - Compute hyperbolic tangent element-wise. + Hyperbolic tangent, element-wise. Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``. @@ -4309,7 +4309,7 @@ def add_newdoc(place, name, doc): >>> # Example of providing the optional output parameter illustrating >>> # that what is returned is a reference to said parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.tanh([0.1], out1) >>> out2 is out1 True @@ -4522,6 +4522,62 @@ def add_newdoc(place, name, doc): """) +add_newdoc('numpy._core.umath', 'real', + """ + Returns the real part of the elements in the array. + + Parameters + ---------- + x : array_like + $PARAMS + + Returns + ------- + y : ndarray + Real part of input array. + $OUT_SCALAR_1 + + See Also + -------- + ndarray.real + ndarray.imag + + Notes + ----- + This ufunc is used internally to implement the `ndarray.real` + attribute and the `np.real` function. It should not be used directly. + + """) + +add_newdoc('numpy._core.umath', 'imag', + """ + Returns the imaginary part of the elements in the array. + + Unlike typical ufuncs, the return is typically a view into the original array. + + Parameters + ---------- + x : array_like + $PARAMS + + Returns + ------- + y : ndarray + Complex part of input array or zeros. + $OUT_SCALAR_1 + + See Also + -------- + ndarray.imag + ndarray.real + + Notes + ----- + This ufunc is used internally to implement the `ndarray.imag` + attribute and the `np.imag` function. It should not be used directly. + + """) + add_newdoc('numpy._core.umath', 'str_len', """ Returns the length of each element. For byte strings, diff --git a/numpy/_core/code_generators/verify_c_api_version.py b/numpy/_core/code_generators/verify_c_api_version.py index 955ec595327e..9c39b044e955 100644 --- a/numpy/_core/code_generators/verify_c_api_version.py +++ b/numpy/_core/code_generators/verify_c_api_version.py @@ -45,7 +45,7 @@ def check_api_version(apiversion): f"{apiversion}, with checksum {curapi_hash}, but recorded " f"checksum in _core/codegen_dir/cversions.txt is {api_hash}. " "If functions were added in the C API, you have to update " - f"C_API_VERSION in {__file__}." + f"C_API_VERSION in numpy/core/meson.build." ) raise MismatchCAPIError(msg) diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index 1a8750507f41..5883bb6be5f5 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -274,7 +274,7 @@ def multiply(a, i): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``bytes_`` or ``str_`` dtype i : array_like, with any integer dtype @@ -409,6 +409,10 @@ class chararray(ndarray): Provides a convenient view on arrays of string and unicode values. + .. deprecated:: 2.5 + ``chararray`` is deprecated. Use an ``ndarray`` with a string or + bytes dtype instead. + .. note:: The `chararray` class exists for backwards compatibility with Numarray, it is not recommended for new development. Starting from numpy @@ -543,7 +547,7 @@ class adds the following functionality: [b'abc', b'abc', b'abc']], dtype='|S5') """ - def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, + def __new__(cls, shape, itemsize=1, unicode=False, buffer=None, offset=0, strides=None, order='C'): if unicode: dtype = str_ @@ -563,10 +567,10 @@ def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, filler = None if buffer is None: - self = ndarray.__new__(subtype, shape, (dtype, itemsize), + self = ndarray.__new__(cls, shape, (dtype, itemsize), order=order) else: - self = ndarray.__new__(subtype, shape, (dtype, itemsize), + self = ndarray.__new__(cls, shape, (dtype, itemsize), buffer=buffer, offset=offset, strides=strides, order=order) @@ -1215,6 +1219,10 @@ def array(obj, itemsize=None, copy=True, unicode=None, order=None): """ Create a `~numpy.char.chararray`. + .. deprecated:: 2.5 + ``chararray`` is deprecated. Use an ``ndarray`` with a string or + bytes dtype instead. + .. note:: This class is provided for numarray backward-compatibility. New code (not concerned with numarray compatibility) should use @@ -1359,6 +1367,10 @@ def asarray(obj, itemsize=None, unicode=None, order=None): Convert the input to a `~numpy.char.chararray`, copying the data only if necessary. + .. deprecated:: 2.5 + ``chararray`` is deprecated. Use an ``ndarray`` with a string or + bytes dtype instead. + Versus a NumPy array of dtype `bytes_` or `str_`, this class adds the following functionality: diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 406d11ea0eb7..fa21bf218f6c 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1,16 +1,9 @@ -from typing import ( - Any, - Literal as L, - Self, - SupportsIndex, - SupportsInt, - TypeAlias, - overload, -) -from typing_extensions import Buffer, TypeVar +from collections.abc import Buffer +from typing import Any, Final, Literal as L, Self, SupportsIndex, SupportsInt, overload +from typing_extensions import TypeVar, deprecated import numpy as np -from numpy import _OrderKACF, bytes_, dtype, int_, ndarray, object_, str_ +from numpy import _OrderKACF, add, bytes_, dtype, int_, ndarray, object_, str_ from numpy._core.multiarray import compare_chararrays from numpy._typing import ( NDArray, @@ -24,6 +17,7 @@ from numpy._typing import ( _Shape, _ShapeLike, _SupportsArray, + _UFunc_Nin1_Nout1, ) __all__ = [ @@ -83,19 +77,24 @@ __all__ = [ ] _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_CharacterT = TypeVar("_CharacterT", bound=np.character) _CharDTypeT_co = TypeVar("_CharDTypeT_co", bound=dtype[np.character], default=dtype, covariant=True) -_CharArray: TypeAlias = chararray[_AnyShape, dtype[_CharacterT]] +type _CharArray[ScalarT: np.character] = chararray[_AnyShape, dtype[ScalarT]] # type: ignore[deprecated] + +type _StringDTypeArray = np.ndarray[_AnyShape, np.dtypes.StringDType] +type _StringDTypeOrUnicodeArray = _StringDTypeArray | NDArray[np.str_] +type _StringDTypeSupportsArray = _SupportsArray[np.dtypes.StringDType] -_StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] -_StringDTypeOrUnicodeArray: TypeAlias = _StringDTypeArray | NDArray[np.str_] -_StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] +### +@deprecated( + "The chararray class is deprecated and will be removed in a future release. " + "Use an ndarray with a string or bytes dtype instead." +) class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): @overload def __new__( - subtype, + cls, shape: _ShapeLike, itemsize: SupportsIndex | SupportsInt = 1, unicode: L[False] = False, @@ -106,7 +105,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): ) -> _CharArray[bytes_]: ... @overload def __new__( - subtype, + cls, shape: _ShapeLike, itemsize: SupportsIndex | SupportsInt, unicode: L[True], @@ -117,7 +116,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): ) -> _CharArray[str_]: ... @overload def __new__( - subtype, + cls, shape: _ShapeLike, itemsize: SupportsIndex | SupportsInt = 1, *, @@ -128,388 +127,203 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): order: _OrderKACF = "C", ) -> _CharArray[str_]: ... + # def __array_finalize__(self, obj: object) -> None: ... - def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] - def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] - def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + # @overload # type: ignore[override] - def __eq__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def __eq__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... @overload - def __eq__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def __eq__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... + # @overload # type: ignore[override] - def __ne__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def __ne__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... @overload - def __ne__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def __ne__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... + # @overload # type: ignore[override] - def __ge__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def __ge__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... # pyrefly: ignore[bad-override] @overload - def __ge__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def __ge__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... # pyright: ignore[reportIncompatibleMethodOverride] + # @overload # type: ignore[override] - def __le__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def __le__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... # pyrefly: ignore[bad-override] @overload - def __le__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def __le__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... # pyright: ignore[reportIncompatibleMethodOverride] + # @overload # type: ignore[override] - def __gt__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def __gt__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... # pyrefly: ignore[bad-override] @overload - def __gt__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def __gt__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... # pyright: ignore[reportIncompatibleMethodOverride] + # @overload # type: ignore[override] - def __lt__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def __lt__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... # pyrefly: ignore[bad-override] @overload - def __lt__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def __lt__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... # pyright: ignore[reportIncompatibleMethodOverride] + # @overload # type: ignore[override] - def __add__( - self: _CharArray[str_], - other: U_co, - ) -> _CharArray[str_]: ... + def __add__(self: _CharArray[str_], other: U_co, /) -> _CharArray[str_]: ... # pyrefly: ignore[bad-override] @overload - def __add__( - self: _CharArray[bytes_], - other: S_co, - ) -> _CharArray[bytes_]: ... + def __add__(self: _CharArray[bytes_], other: S_co, /) -> _CharArray[bytes_]: ... # pyright: ignore[reportIncompatibleMethodOverride] + # @overload # type: ignore[override] - def __radd__( - self: _CharArray[str_], - other: U_co, - ) -> _CharArray[str_]: ... + def __radd__(self: _CharArray[str_], other: U_co, /) -> _CharArray[str_]: ... # pyrefly: ignore[bad-override] @overload - def __radd__( - self: _CharArray[bytes_], - other: S_co, - ) -> _CharArray[bytes_]: ... + def __radd__(self: _CharArray[bytes_], other: S_co, /) -> _CharArray[bytes_]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + # + def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + + # + def decode(self: _CharArray[bytes_], encoding: str | None = None, errors: str | None = None) -> _CharArray[str_]: ... + def encode(self: _CharArray[str_], encoding: str | None = None, errors: str | None = None) -> _CharArray[bytes_]: ... + + # @overload - def center( - self: _CharArray[str_], - width: i_co, - fillchar: U_co = " ", - ) -> _CharArray[str_]: ... + def center(self: _CharArray[str_], width: i_co, fillchar: U_co = " ") -> _CharArray[str_]: ... @overload - def center( - self: _CharArray[bytes_], - width: i_co, - fillchar: str | S_co = " ", - ) -> _CharArray[bytes_]: ... + def center(self: _CharArray[bytes_], width: i_co, fillchar: str | S_co = " ") -> _CharArray[bytes_]: ... + # @overload - def count( - self: _CharArray[str_], - sub: U_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def count(self: _CharArray[str_], sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... @overload - def count( - self: _CharArray[bytes_], - sub: S_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... - - def decode( - self: _CharArray[bytes_], - encoding: str | None = None, - errors: str | None = None, - ) -> _CharArray[str_]: ... - - def encode( - self: _CharArray[str_], - encoding: str | None = None, - errors: str | None = None, - ) -> _CharArray[bytes_]: ... + def count(self: _CharArray[bytes_], sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... + # @overload - def endswith( - self: _CharArray[str_], - suffix: U_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[np.bool]: ... + def endswith(self: _CharArray[str_], suffix: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... @overload - def endswith( - self: _CharArray[bytes_], - suffix: S_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[np.bool]: ... - - def expandtabs( - self, - tabsize: i_co = 8, - ) -> Self: ... + def endswith(self: _CharArray[bytes_], suffix: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... + + # + def expandtabs(self, tabsize: i_co = 8) -> Self: ... + # @overload - def find( - self: _CharArray[str_], - sub: U_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def find(self: _CharArray[str_], sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... @overload - def find( - self: _CharArray[bytes_], - sub: S_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def find(self: _CharArray[bytes_], sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... + # @overload - def index( - self: _CharArray[str_], - sub: U_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def index(self: _CharArray[str_], sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... @overload - def index( - self: _CharArray[bytes_], - sub: S_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def index(self: _CharArray[bytes_], sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... + # @overload - def join( - self: _CharArray[str_], - seq: U_co, - ) -> _CharArray[str_]: ... + def join(self: _CharArray[str_], seq: U_co) -> _CharArray[str_]: ... @overload - def join( - self: _CharArray[bytes_], - seq: S_co, - ) -> _CharArray[bytes_]: ... + def join(self: _CharArray[bytes_], seq: S_co) -> _CharArray[bytes_]: ... + # @overload - def ljust( - self: _CharArray[str_], - width: i_co, - fillchar: U_co = " ", - ) -> _CharArray[str_]: ... + def ljust(self: _CharArray[str_], width: i_co, fillchar: U_co = " ") -> _CharArray[str_]: ... @overload - def ljust( - self: _CharArray[bytes_], - width: i_co, - fillchar: str | S_co = " ", - ) -> _CharArray[bytes_]: ... + def ljust(self: _CharArray[bytes_], width: i_co, fillchar: str | S_co = " ") -> _CharArray[bytes_]: ... + # @overload - def lstrip( - self: _CharArray[str_], - chars: U_co | None = None, - ) -> _CharArray[str_]: ... + def lstrip(self: _CharArray[str_], chars: U_co | None = None) -> _CharArray[str_]: ... @overload - def lstrip( - self: _CharArray[bytes_], - chars: S_co | None = None, - ) -> _CharArray[bytes_]: ... + def lstrip(self: _CharArray[bytes_], chars: S_co | None = None) -> _CharArray[bytes_]: ... + # @overload # type: ignore[override] - def partition( - self: _CharArray[str_], - sep: U_co, - ) -> _CharArray[str_]: ... + def partition(self: _CharArray[str_], sep: U_co) -> _CharArray[str_]: ... # pyrefly: ignore[bad-override] @overload - def partition( - self: _CharArray[bytes_], - sep: S_co, - ) -> _CharArray[bytes_]: ... + def partition(self: _CharArray[bytes_], sep: S_co) -> _CharArray[bytes_]: ... # pyright: ignore[reportIncompatibleMethodOverride] + # @overload - def replace( - self: _CharArray[str_], - old: U_co, - new: U_co, - count: i_co | None = None, - ) -> _CharArray[str_]: ... + def replace(self: _CharArray[str_], old: U_co, new: U_co, count: i_co | None = None) -> _CharArray[str_]: ... @overload - def replace( - self: _CharArray[bytes_], - old: S_co, - new: S_co, - count: i_co | None = None, - ) -> _CharArray[bytes_]: ... + def replace(self: _CharArray[bytes_], old: S_co, new: S_co, count: i_co | None = None) -> _CharArray[bytes_]: ... + # @overload - def rfind( - self: _CharArray[str_], - sub: U_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def rfind(self: _CharArray[str_], sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... @overload - def rfind( - self: _CharArray[bytes_], - sub: S_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def rfind(self: _CharArray[bytes_], sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... + # @overload - def rindex( - self: _CharArray[str_], - sub: U_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def rindex(self: _CharArray[str_], sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... @overload - def rindex( - self: _CharArray[bytes_], - sub: S_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def rindex(self: _CharArray[bytes_], sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... + # @overload - def rjust( - self: _CharArray[str_], - width: i_co, - fillchar: U_co = " ", - ) -> _CharArray[str_]: ... + def rjust(self: _CharArray[str_], width: i_co, fillchar: U_co = " ") -> _CharArray[str_]: ... @overload - def rjust( - self: _CharArray[bytes_], - width: i_co, - fillchar: str | S_co = " ", - ) -> _CharArray[bytes_]: ... + def rjust(self: _CharArray[bytes_], width: i_co, fillchar: str | S_co = " ") -> _CharArray[bytes_]: ... + # @overload - def rpartition( - self: _CharArray[str_], - sep: U_co, - ) -> _CharArray[str_]: ... + def rpartition(self: _CharArray[str_], sep: U_co) -> _CharArray[str_]: ... @overload - def rpartition( - self: _CharArray[bytes_], - sep: S_co, - ) -> _CharArray[bytes_]: ... + def rpartition(self: _CharArray[bytes_], sep: S_co) -> _CharArray[bytes_]: ... + # @overload - def rsplit( - self: _CharArray[str_], - sep: U_co | None = None, - maxsplit: i_co | None = None, - ) -> NDArray[object_]: ... + def rsplit(self: _CharArray[str_], sep: U_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload - def rsplit( - self: _CharArray[bytes_], - sep: S_co | None = None, - maxsplit: i_co | None = None, - ) -> NDArray[object_]: ... + def rsplit(self: _CharArray[bytes_], sep: S_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... + # @overload - def rstrip( - self: _CharArray[str_], - chars: U_co | None = None, - ) -> _CharArray[str_]: ... + def rstrip(self: _CharArray[str_], chars: U_co | None = None) -> _CharArray[str_]: ... @overload - def rstrip( - self: _CharArray[bytes_], - chars: S_co | None = None, - ) -> _CharArray[bytes_]: ... + def rstrip(self: _CharArray[bytes_], chars: S_co | None = None) -> _CharArray[bytes_]: ... + # @overload - def split( - self: _CharArray[str_], - sep: U_co | None = None, - maxsplit: i_co | None = None, - ) -> NDArray[object_]: ... + def split(self: _CharArray[str_], sep: U_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload - def split( - self: _CharArray[bytes_], - sep: S_co | None = None, - maxsplit: i_co | None = None, - ) -> NDArray[object_]: ... + def split(self: _CharArray[bytes_], sep: S_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... + # def splitlines(self, keepends: b_co | None = None) -> NDArray[object_]: ... + # @overload - def startswith( - self: _CharArray[str_], - prefix: U_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[np.bool]: ... + def startswith(self: _CharArray[str_], prefix: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... @overload - def startswith( - self: _CharArray[bytes_], - prefix: S_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[np.bool]: ... + def startswith(self: _CharArray[bytes_], prefix: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... + # @overload - def strip( - self: _CharArray[str_], - chars: U_co | None = None, - ) -> _CharArray[str_]: ... + def strip(self: _CharArray[str_], chars: U_co | None = None) -> _CharArray[str_]: ... @overload - def strip( - self: _CharArray[bytes_], - chars: S_co | None = None, - ) -> _CharArray[bytes_]: ... + def strip(self: _CharArray[bytes_], chars: S_co | None = None) -> _CharArray[bytes_]: ... + # @overload - def translate( - self: _CharArray[str_], - table: U_co, - deletechars: U_co | None = None, - ) -> _CharArray[str_]: ... + def translate(self: _CharArray[str_], table: U_co, deletechars: U_co | None = None) -> _CharArray[str_]: ... @overload - def translate( - self: _CharArray[bytes_], - table: S_co, - deletechars: S_co | None = None, - ) -> _CharArray[bytes_]: ... + def translate(self: _CharArray[bytes_], table: S_co, deletechars: S_co | None = None) -> _CharArray[bytes_]: ... + # def zfill(self, width: i_co) -> Self: ... def capitalize(self) -> Self: ... def title(self) -> Self: ... def swapcase(self) -> Self: ... def lower(self) -> Self: ... def upper(self) -> Self: ... + + # def isalnum(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... def isalpha(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... def isdigit(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... @@ -563,15 +377,6 @@ def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... @overload def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... -@overload -def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ... -@overload -def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... -@overload -def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... -@overload -def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... - @overload def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... @overload @@ -582,13 +387,13 @@ def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ... def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ... @overload -def mod(a: U_co, value: Any) -> NDArray[np.str_]: ... +def mod(a: U_co, values: Any) -> NDArray[np.str_]: ... @overload -def mod(a: S_co, value: Any) -> NDArray[np.bytes_]: ... +def mod(a: S_co, values: Any) -> NDArray[np.bytes_]: ... @overload -def mod(a: _StringDTypeSupportsArray, value: Any) -> _StringDTypeArray: ... +def mod(a: _StringDTypeSupportsArray, values: Any) -> _StringDTypeArray: ... @overload -def mod(a: T_co, value: Any) -> _StringDTypeOrUnicodeArray: ... +def mod(a: T_co, values: Any) -> _StringDTypeOrUnicodeArray: ... @overload def capitalize(a: U_co) -> NDArray[str_]: ... @@ -608,16 +413,8 @@ def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: str | _StringDTy @overload def center(a: T_co, width: i_co, fillchar: T_co = " ") -> _StringDTypeOrUnicodeArray: ... -def decode( - a: S_co, - encoding: str | None = None, - errors: str | None = None, -) -> NDArray[str_]: ... -def encode( - a: U_co | T_co, - encoding: str | None = None, - errors: str | None = None, -) -> NDArray[bytes_]: ... +def decode(a: S_co, encoding: str | None = None, errors: str | None = None) -> NDArray[str_]: ... +def encode(a: U_co | T_co, encoding: str | None = None, errors: str | None = None) -> NDArray[bytes_]: ... @overload def expandtabs(a: U_co, tabsize: i_co = 8) -> NDArray[str_]: ... @@ -674,58 +471,24 @@ def partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _ def partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def replace( - a: U_co, - old: U_co, - new: U_co, - count: i_co | None = -1, -) -> NDArray[str_]: ... +def replace(a: U_co, old: U_co, new: U_co, count: i_co | None = -1) -> NDArray[str_]: ... @overload -def replace( - a: S_co, - old: S_co, - new: S_co, - count: i_co | None = -1, -) -> NDArray[bytes_]: ... +def replace(a: S_co, old: S_co, new: S_co, count: i_co | None = -1) -> NDArray[bytes_]: ... @overload def replace( - a: _StringDTypeSupportsArray, - old: _StringDTypeSupportsArray, - new: _StringDTypeSupportsArray, - count: i_co = -1, + a: _StringDTypeSupportsArray, old: _StringDTypeSupportsArray, new: _StringDTypeSupportsArray, count: i_co = -1 ) -> _StringDTypeArray: ... @overload -def replace( - a: T_co, - old: T_co, - new: T_co, - count: i_co = -1, -) -> _StringDTypeOrUnicodeArray: ... - -@overload -def rjust( - a: U_co, - width: i_co, - fillchar: U_co = " ", -) -> NDArray[str_]: ... -@overload -def rjust( - a: S_co, - width: i_co, - fillchar: str | S_co = " ", -) -> NDArray[bytes_]: ... -@overload -def rjust( - a: _StringDTypeSupportsArray, - width: i_co, - fillchar: str | _StringDTypeSupportsArray = " ", -) -> _StringDTypeArray: ... +def replace(a: T_co, old: T_co, new: T_co, count: i_co = -1) -> _StringDTypeOrUnicodeArray: ... + +@overload +def rjust(a: U_co, width: i_co, fillchar: U_co = " ") -> NDArray[str_]: ... +@overload +def rjust(a: S_co, width: i_co, fillchar: str | S_co = " ") -> NDArray[bytes_]: ... @overload -def rjust( - a: T_co, - width: i_co, - fillchar: T_co = " ", -) -> _StringDTypeOrUnicodeArray: ... +def rjust(a: _StringDTypeSupportsArray, width: i_co, fillchar: str | _StringDTypeSupportsArray = " ") -> _StringDTypeArray: ... +@overload +def rjust(a: T_co, width: i_co, fillchar: T_co = " ") -> _StringDTypeOrUnicodeArray: ... @overload def rpartition(a: U_co, sep: U_co) -> NDArray[str_]: ... @@ -737,29 +500,15 @@ def rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def rsplit( - a: U_co, - sep: U_co | None = None, - maxsplit: i_co | None = None, -) -> NDArray[object_]: ... +def rsplit(a: U_co, sep: U_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload -def rsplit( - a: S_co, - sep: S_co | None = None, - maxsplit: i_co | None = None, -) -> NDArray[object_]: ... +def rsplit(a: S_co, sep: S_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload def rsplit( - a: _StringDTypeSupportsArray, - sep: _StringDTypeSupportsArray | None = None, - maxsplit: i_co | None = None, + a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray | None = None, maxsplit: i_co | None = None ) -> NDArray[object_]: ... @overload -def rsplit( - a: T_co, - sep: T_co | None = None, - maxsplit: i_co | None = None, -) -> NDArray[object_]: ... +def rsplit(a: T_co, sep: T_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload def rstrip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... @@ -771,29 +520,15 @@ def rstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None def rstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload -def split( - a: U_co, - sep: U_co | None = None, - maxsplit: i_co | None = None, -) -> NDArray[object_]: ... +def split(a: U_co, sep: U_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload -def split( - a: S_co, - sep: S_co | None = None, - maxsplit: i_co | None = None, -) -> NDArray[object_]: ... +def split(a: S_co, sep: S_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload def split( - a: _StringDTypeSupportsArray, - sep: _StringDTypeSupportsArray | None = None, - maxsplit: i_co | None = None, + a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray | None = None, maxsplit: i_co | None = None ) -> NDArray[object_]: ... @overload -def split( - a: T_co, - sep: T_co | None = None, - maxsplit: i_co | None = None, -) -> NDArray[object_]: ... +def split(a: T_co, sep: T_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... def splitlines(a: UST_co, keepends: b_co | None = None) -> NDArray[np.object_]: ... @@ -825,29 +560,13 @@ def title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... def title(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def translate( - a: U_co, - table: str, - deletechars: str | None = None, -) -> NDArray[str_]: ... +def translate(a: U_co, table: str, deletechars: str | None = None) -> NDArray[str_]: ... @overload -def translate( - a: S_co, - table: str, - deletechars: str | None = None, -) -> NDArray[bytes_]: ... +def translate(a: S_co, table: str, deletechars: str | None = None) -> NDArray[bytes_]: ... @overload -def translate( - a: _StringDTypeSupportsArray, - table: str, - deletechars: str | None = None, -) -> _StringDTypeArray: ... +def translate(a: _StringDTypeSupportsArray, table: str, deletechars: str | None = None) -> _StringDTypeArray: ... @overload -def translate( - a: T_co, - table: str, - deletechars: str | None = None, -) -> _StringDTypeOrUnicodeArray: ... +def translate(a: T_co, table: str, deletechars: str | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def upper(a: U_co) -> NDArray[str_]: ... @@ -869,176 +588,72 @@ def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ... # String information @overload -def count( - a: U_co, - sub: U_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def count( - a: S_co, - sub: S_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def count( - a: T_co, - sub: T_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.int_]: ... - -@overload -def endswith( - a: U_co, - suffix: U_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.bool]: ... -@overload -def endswith( - a: S_co, - suffix: S_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.bool]: ... -@overload -def endswith( - a: T_co, - suffix: T_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.bool]: ... - -@overload -def find( - a: U_co, - sub: U_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def find( - a: S_co, - sub: S_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def find( - a: T_co, - sub: T_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.int_]: ... - -@overload -def index( - a: U_co, - sub: U_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def index( - a: S_co, - sub: S_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def index( - a: T_co, - sub: T_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.int_]: ... - -def isalpha(a: UST_co) -> NDArray[np.bool]: ... -def isalnum(a: UST_co) -> NDArray[np.bool]: ... -def isdecimal(a: U_co | T_co) -> NDArray[np.bool]: ... -def isdigit(a: UST_co) -> NDArray[np.bool]: ... -def islower(a: UST_co) -> NDArray[np.bool]: ... -def isnumeric(a: U_co | T_co) -> NDArray[np.bool]: ... -def isspace(a: UST_co) -> NDArray[np.bool]: ... -def istitle(a: UST_co) -> NDArray[np.bool]: ... -def isupper(a: UST_co) -> NDArray[np.bool]: ... - -@overload -def rfind( - a: U_co, - sub: U_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def rfind( - a: S_co, - sub: S_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def rfind( - a: T_co, - sub: T_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.int_]: ... - -@overload -def rindex( - a: U_co, - sub: U_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def rindex( - a: S_co, - sub: S_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def rindex( - a: T_co, - sub: T_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.int_]: ... - -@overload -def startswith( - a: U_co, - prefix: U_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.bool]: ... -@overload -def startswith( - a: S_co, - prefix: S_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.bool]: ... -@overload -def startswith( - a: T_co, - prefix: T_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.bool]: ... - -def str_len(A: UST_co) -> NDArray[int_]: ... +def count(a: U_co, sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def count(a: S_co, sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def count(a: T_co, sub: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.int_]: ... + +@overload +def endswith(a: U_co, suffix: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... +@overload +def endswith(a: S_co, suffix: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... +@overload +def endswith(a: T_co, suffix: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... + +@overload +def find(a: U_co, sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def find(a: S_co, sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def find(a: T_co, sub: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.int_]: ... + +@overload +def index(a: U_co, sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def index(a: S_co, sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def index(a: T_co, sub: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.int_]: ... + +isalpha: Final[_UFunc_Nin1_Nout1[L["isalpha"], L[0], L[False]]] = ... +isalnum: Final[_UFunc_Nin1_Nout1[L["isalnum"], L[0], L[False]]] = ... +isdecimal: Final[_UFunc_Nin1_Nout1[L["isdecimal"], L[0], L[False]]] = ... +isdigit: Final[_UFunc_Nin1_Nout1[L["isdigit"], L[0], L[False]]] = ... +islower: Final[_UFunc_Nin1_Nout1[L["islower"], L[0], L[False]]] = ... +isnumeric: Final[_UFunc_Nin1_Nout1[L["isnumeric"], L[0], L[False]]] = ... +isspace: Final[_UFunc_Nin1_Nout1[L["isspace"], L[0], L[False]]] = ... +istitle: Final[_UFunc_Nin1_Nout1[L["istitle"], L[0], L[False]]] = ... +isupper: Final[_UFunc_Nin1_Nout1[L["isupper"], L[0], L[False]]] = ... + +str_len: Final[_UFunc_Nin1_Nout1[L["str_len"], L[0], L[0]]] = ... + +@overload +def rfind(a: U_co, sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def rfind(a: S_co, sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def rfind(a: T_co, sub: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.int_]: ... + +@overload +def rindex(a: U_co, sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def rindex(a: S_co, sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def rindex(a: T_co, sub: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.int_]: ... + +@overload +def startswith(a: U_co, prefix: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... +@overload +def startswith(a: S_co, prefix: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... +@overload +def startswith(a: T_co, prefix: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... # Overload 1 and 2: str- or bytes-based array-likes # overload 3 and 4: arbitrary object with unicode=False (-> bytes_) # overload 5 and 6: arbitrary object with unicode=True (-> str_) # overload 7: arbitrary object with unicode=None (default) (-> str_ | bytes_) @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: U_co, itemsize: int | None = None, @@ -1047,6 +662,7 @@ def array( order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: S_co, itemsize: int | None = None, @@ -1055,6 +671,7 @@ def array( order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: object, itemsize: int | None, @@ -1063,6 +680,7 @@ def array( order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: object, itemsize: int | None = None, @@ -1072,6 +690,7 @@ def array( order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: object, itemsize: int | None, @@ -1080,6 +699,7 @@ def array( order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: object, itemsize: int | None = None, @@ -1089,6 +709,7 @@ def array( order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: object, itemsize: int | None = None, @@ -1098,6 +719,7 @@ def array( ) -> _CharArray[str_] | _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: U_co, itemsize: int | None = None, @@ -1105,6 +727,7 @@ def asarray( order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: S_co, itemsize: int | None = None, @@ -1112,6 +735,7 @@ def asarray( order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: object, itemsize: int | None, @@ -1119,6 +743,7 @@ def asarray( order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: object, itemsize: int | None = None, @@ -1127,6 +752,7 @@ def asarray( order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: object, itemsize: int | None, @@ -1134,6 +760,7 @@ def asarray( order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: object, itemsize: int | None = None, @@ -1142,6 +769,7 @@ def asarray( order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: object, itemsize: int | None = None, diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index 9461994f5795..7b19808a94f2 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -401,7 +401,7 @@ def _greedy_path(input_sets, output_set, idx_dict, memory_limit): if result is not None: known_contractions.append(result) - # If we do not have a inner contraction, rescan pairs + # If we do not have an inner contraction, rescan pairs # including outer products if len(known_contractions) == 0: @@ -608,8 +608,8 @@ def _parse_einsum_input(operands): # Make sure output subscripts are in the input for char in output_subscript: if output_subscript.count(char) != 1: - raise ValueError("Output character %s appeared more than once in " - "the output." % char) + raise ValueError(f"Output character {char} appeared more than once in " + "the output.") if char not in input_subscripts: raise ValueError(f"Output character {char} did not appear in the input") @@ -790,9 +790,9 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): for tnum, term in enumerate(input_list): sh = operands[tnum].shape if len(sh) != len(term): - raise ValueError("Einstein sum subscript %s does not contain the " - "correct number of indices for operand %d." - % (input_subscripts[tnum], tnum)) + raise ValueError(f"Einstein sum subscript {input_subscripts[tnum]} " + "does not contain the " + f"correct number of indices for operand {tnum}.") for cnum, char in enumerate(term): dim = sh[cnum] @@ -801,9 +801,9 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): if dimension_dict[char] == 1: dimension_dict[char] = dim elif dim not in (1, dimension_dict[char]): - raise ValueError("Size of label '%s' for operand %d (%d) " - "does not match previous terms (%d)." - % (char, tnum, dimension_dict[char], dim)) + raise ValueError(f"Size of label {char!r} for " + f"operand {tnum} ({dimension_dict[char]}) " + f"does not match previous terms ({dim}).") else: dimension_dict[char] = dim @@ -886,7 +886,6 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): # Return the path along with a nice string representation overall_contraction = input_subscripts + "->" + output_subscript - header = ("scaling", "current", "remaining") # Compute naive cost # This isn't quite right, need to look into exactly how einsum does this @@ -903,20 +902,19 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): path_print = f" Complete contraction: {overall_contraction}\n" path_print += f" Naive scaling: {num_indices}\n" - path_print += " Optimized scaling: %d\n" % max(scale_list) + path_print += f" Optimized scaling: {max(scale_list)}\n" path_print += f" Naive FLOP count: {naive_cost:.3e}\n" path_print += f" Optimized FLOP count: {opt_cost:.3e}\n" path_print += f" Theoretical speedup: {speedup:3.3f}\n" path_print += f" Largest intermediate: {max_i:.3e} elements\n" path_print += "-" * 74 + "\n" - path_print += "%6s %24s %40s\n" % header + path_print += f"{'scaling':>6} {'current':>24} {'remaining':>40}\n" path_print += "-" * 74 for n, contraction in enumerate(contraction_list): _, einsum_str, remaining = contraction remaining_str = ",".join(remaining) + "->" + output_subscript - path_run = (scale_list[n], einsum_str, remaining_str) - path_print += "\n%4d %24s %40s" % path_run + path_print += f"\n{scale_list[n]:4d} {einsum_str:>24} {remaining_str:>40}" path = ['einsum_path'] + path return (path, path_print) @@ -970,7 +968,7 @@ def _parse_eq_to_pure_multiplication(a_term, shape_a, b_term, shape_b, out): @functools.lru_cache(2**12) def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): """Cached parsing of a two term einsum equation into the necessary - sequence of arguments for contracttion via batched matrix multiplication. + sequence of arguments for contraction via batched matrix multiplication. The steps we need to specify are: 1. Remove repeated and trivial indices from the left and right terms, @@ -1188,14 +1186,14 @@ def bmm_einsum(eq, a, b, out=None, **kwargs): # prepare left if eq_a is not None: - # diagonals, sums, and tranpose + # diagonals, sums, and transpose a = c_einsum(eq_a, a) if new_shape_a is not None: a = reshape(a, new_shape_a) # prepare right if eq_b is not None: - # diagonals, sums, and tranpose + # diagonals, sums, and transpose b = c_einsum(eq_b, b) if new_shape_b is not None: b = reshape(b, new_shape_b) @@ -1612,8 +1610,7 @@ def einsum(*operands, out=None, optimize=False, **kwargs): # Check the kwargs to avoid a more cryptic error later, without having to # repeat default values here valid_einsum_kwargs = ['dtype', 'order', 'casting'] - unknown_kwargs = [k for (k, v) in kwargs.items() if - k not in valid_einsum_kwargs] + unknown_kwargs = [k for k in kwargs if k not in valid_einsum_kwargs] if len(unknown_kwargs): raise TypeError(f"Did not understand the following kwargs: {unknown_kwargs}") diff --git a/numpy/_core/einsumfunc.pyi b/numpy/_core/einsumfunc.pyi index 6d34883e6625..3e42ef6dc238 100644 --- a/numpy/_core/einsumfunc.pyi +++ b/numpy/_core/einsumfunc.pyi @@ -1,8 +1,8 @@ from collections.abc import Sequence -from typing import Any, Literal, TypeAlias, TypeVar, overload +from typing import Any, Literal, overload import numpy as np -from numpy import _OrderKACF, number +from numpy import _OrderKACF from numpy._typing import ( NDArray, _ArrayLikeBool_co, @@ -22,14 +22,9 @@ from numpy._typing import ( __all__ = ["einsum", "einsum_path"] -_ArrayT = TypeVar( - "_ArrayT", - bound=NDArray[np.bool | number], -) - -_OptimizeKind: TypeAlias = bool | Literal["greedy", "optimal"] | Sequence[Any] | None -_CastingSafe: TypeAlias = Literal["no", "equiv", "safe", "same_kind"] -_CastingUnsafe: TypeAlias = Literal["unsafe"] +type _OptimizeKind = bool | Literal["greedy", "optimal"] | Sequence[Any] | None +type _CastingSafe = Literal["no", "equiv", "safe", "same_kind"] +type _CastingUnsafe = Literal["unsafe"] # TODO: Properly handle the `casting`-based combinatorics # TODO: We need to evaluate the content `__subscripts` in order @@ -104,27 +99,27 @@ def einsum( optimize: _OptimizeKind = False, ) -> Any: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeComplex_co, - out: _ArrayT, + out: OutT, dtype: _DTypeLikeComplex_co | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = False, -) -> _ArrayT: ... +) -> OutT: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: Any, - out: _ArrayT, + out: OutT, casting: _CastingUnsafe, dtype: _DTypeLikeComplex_co | None = ..., order: _OrderKACF = ..., optimize: _OptimizeKind = False, -) -> _ArrayT: ... +) -> OutT: ... @overload def einsum( @@ -149,27 +144,27 @@ def einsum( optimize: _OptimizeKind = False, ) -> Any: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeObject_co, - out: _ArrayT, + out: OutT, dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = False, -) -> _ArrayT: ... +) -> OutT: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: Any, - out: _ArrayT, + out: OutT, casting: _CastingUnsafe, dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., optimize: _OptimizeKind = False, -) -> _ArrayT: ... +) -> OutT: ... # NOTE: `einsum_call` is a hidden kwarg unavailable for public use. # It is therefore excluded from the signatures below. diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 33fb9ec4b39f..e5f4ec0e77f5 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -1188,7 +1188,7 @@ def argsort(a, axis=-1, kind=None, order=None, *, stable=None): array([[0, 3], [2, 2]]) - Indices of the sorted elements of a N-dimensional array: + Indices of the sorted elements of an N-dimensional array: >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape) >>> ind @@ -1275,7 +1275,7 @@ def argmax(a, axis=None, out=None, *, keepdims=np._NoValue): >>> np.argmax(a, axis=1) array([2, 2]) - Indexes of the maximal elements of a N-dimensional array: + Indexes of the maximal elements of an N-dimensional array: >>> a.flat[np.argmax(a)] 15 @@ -1375,7 +1375,7 @@ def argmin(a, axis=None, out=None, *, keepdims=np._NoValue): >>> np.argmin(a, axis=1) array([0, 0]) - Indices of the minimum elements of a N-dimensional array: + Indices of the minimum elements of an N-dimensional array: >>> a.flat[np.argmin(a)] 10 @@ -2026,7 +2026,7 @@ def nonzero(a): Notes ----- While the nonzero values can be obtained with ``a[nonzero(a)]``, it is - recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which + recommended to use ``x[x.astype(np.bool)]`` or ``x[x != 0]`` instead, which will correctly handle 0-d arrays. Examples @@ -2385,7 +2385,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, more precise approach to summation. Especially when summing a large number of lower precision floating point numbers, such as ``float32``, numerical errors can become significant. - In such cases it can be advisable to use `dtype="float64"` to use a higher + In such cases it can be advisable to use `dtype=np.float64` to use a higher precision for the output. Examples @@ -2721,7 +2721,7 @@ def cumulative_prod(x, /, *, axis=None, dtype=None, out=None, ... # total product 1*2*3 = 6 array([1, 2, 6]) >>> a = np.array([1, 2, 3, 4, 5, 6]) - >>> np.cumulative_prod(a, dtype=float) # specify type of output + >>> np.cumulative_prod(a, dtype=np.float64) # specify type of output array([ 1., 2., 6., 24., 120., 720.]) The cumulative product for each column (i.e., over the rows) of ``b``: @@ -2808,7 +2808,7 @@ def cumulative_sum(x, /, *, axis=None, dtype=None, out=None, array([1, 2, 3, 4, 5, 6]) >>> np.cumulative_sum(a) array([ 1, 3, 6, 10, 15, 21]) - >>> np.cumulative_sum(a, dtype=float) # specifies type of output value(s) + >>> np.cumulative_sum(a, dtype=np.float64) # specifies type of output value(s) array([ 1., 3., 6., 10., 15., 21.]) >>> b = np.array([[1, 2, 3], [4, 5, 6]]) @@ -2892,7 +2892,7 @@ def cumsum(a, axis=None, dtype=None, out=None): [4, 5, 6]]) >>> np.cumsum(a) array([ 1, 3, 6, 10, 15, 21]) - >>> np.cumsum(a, dtype=float) # specifies type of output value(s) + >>> np.cumsum(a, dtype=np.float64) # specifies type of output value(s) array([ 1., 3., 6., 10., 15., 21.]) >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns @@ -3096,7 +3096,7 @@ def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, array([1, 3]) >>> np.max(a, where=[False, True], initial=-1, axis=0) array([-1, 3]) - >>> b = np.arange(5, dtype=float) + >>> b = np.arange(5, dtype=np.float64) >>> b[2] = np.nan >>> np.max(b) np.float64(nan) @@ -3235,7 +3235,7 @@ def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, >>> np.min(a, where=[False, True], initial=10, axis=0) array([10, 1]) - >>> b = np.arange(5, dtype=float) + >>> b = np.arange(5, dtype=np.float64) >>> b[2] = np.nan >>> np.min(b) np.float64(nan) @@ -3456,7 +3456,7 @@ def cumprod(a, axis=None, dtype=None, out=None): ... # total product 1*2*3 = 6 array([1, 2, 6]) >>> a = np.array([[1, 2, 3], [4, 5, 6]]) - >>> np.cumprod(a, dtype=float) # specify type of output + >>> np.cumprod(a, dtype=np.float64) # specify type of output array([ 1., 2., 6., 24., 120., 720.]) The cumulative product for each column (i.e., over the rows) of `a`: diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 2a9762240e3d..dbff4fcc8283 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -1,5 +1,4 @@ -# ruff: noqa: ANN401 -from _typeshed import Incomplete +from _typeshed import SupportsBool from collections.abc import Sequence from typing import ( Any, @@ -7,9 +6,7 @@ from typing import ( Never, Protocol, SupportsIndex, - TypeAlias, TypedDict, - TypeVar, Unpack, overload, type_check_only, @@ -17,7 +14,6 @@ from typing import ( import numpy as np from numpy import ( - _AnyShapeT, _CastingKind, _ModeKind, _OrderACF, @@ -25,16 +21,9 @@ from numpy import ( _PartitionKind, _SortKind, _SortSide, - complexfloating, float16, - floating, - generic, - int64, - int_, intp, object_, - timedelta64, - uint64, ) from numpy._globals import _NoValueType from numpy._typing import ( @@ -49,16 +38,19 @@ from numpy._typing import ( _ArrayLikeInt, _ArrayLikeInt_co, _ArrayLikeObject_co, - _ArrayLikeUInt_co, _BoolLike_co, _ComplexLike_co, _DTypeLike, + _FloatLike_co, _IntLike_co, _NestedSequence, _NumberLike_co, _ScalarLike_co, + _Shape, _ShapeLike, + _SupportsArray, ) +from numpy._typing._array_like import _DualArrayLike __all__ = [ "all", @@ -107,18 +99,11 @@ __all__ = [ "var", ] -_ScalarT = TypeVar("_ScalarT", bound=generic) -_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) -_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) -_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool]) - @type_check_only -class _SupportsShape(Protocol[_ShapeT_co]): +class _SupportsShape[ShapeT_co: _Shape](Protocol): # NOTE: it matters that `self` is positional only @property - def shape(self, /) -> _ShapeT_co: ... + def shape(self, /) -> ShapeT_co: ... @type_check_only class _UFuncKwargs(TypedDict, total=False): @@ -129,20 +114,50 @@ class _UFuncKwargs(TypedDict, total=False): casting: _CastingKind # a "sequence" that isn't a string, bytes, bytearray, or memoryview -_T = TypeVar("_T") -_PyArray: TypeAlias = list[_T] | tuple[_T, ...] +type _PyArray[_T] = list[_T] | tuple[_T, ...] # `int` also covers `bool` -_PyScalar: TypeAlias = complex | bytes | str +type _PyScalar = complex | bytes | str + +type _0D = tuple[()] +type _1D = tuple[int] +type _2D = tuple[int, int] +type _3D = tuple[int, int, int] +type _4D = tuple[int, int, int, int] + +type _Array1D[ScalarT: np.generic] = np.ndarray[_1D, np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[_2D, np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[_3D, np.dtype[ScalarT]] +# workaround for mypy's and pyright's typing spec non-compliance regarding overloads +type _ArrayJustND[ScalarT: np.generic] = np.ndarray[tuple[Never, Never, Never, Never], np.dtype[ScalarT]] + +type _ToArray1D[ScalarT: np.generic] = _Array1D[ScalarT] | Sequence[ScalarT] +type _ToArray2D[ScalarT: np.generic] = _Array2D[ScalarT] | Sequence[Sequence[ScalarT]] +type _ToArray3D[ScalarT: np.generic] = _Array3D[ScalarT] | Sequence[Sequence[Sequence[ScalarT]]] + +type _ArrayLikeMultiplicative_co = _DualArrayLike[np.dtype[np.number | np.bool | np.object_], complex] +type _ArrayLikeNumeric_co = _DualArrayLike[np.dtype[np.number | np.bool | np.object_ | np.timedelta64], complex] + +@type_check_only +class _CanLE(Protocol): + def __le__(self, other: Any, /) -> SupportsBool: ... + +@type_check_only +class _CanGE(Protocol): + def __ge__(self, other: Any, /) -> SupportsBool: ... + +type _Orderable = _CanLE | _CanGE + +### # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def take( - a: _ArrayLike[_ScalarT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _IntLike_co, axis: None = None, out: None = None, mode: _ModeKind = "raise", -) -> _ScalarT: ... +) -> ScalarT: ... @overload def take( a: ArrayLike, @@ -152,13 +167,13 @@ def take( mode: _ModeKind = "raise", ) -> Any: ... @overload -def take( - a: _ArrayLike[_ScalarT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, mode: _ModeKind = "raise", -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def take( a: ArrayLike, @@ -168,50 +183,51 @@ def take( mode: _ModeKind = "raise", ) -> NDArray[Any]: ... @overload -def take( +def take[ArrayT: np.ndarray]( a: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex | None, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def take( +def take[ArrayT: np.ndarray]( a: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", -) -> _ArrayT: ... +) -> ArrayT: ... -@overload -def reshape( # shape: index - a: _ArrayLike[_ScalarT], +# keep in sync with `ma.core.reshape` +@overload # shape: index +def reshape[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], /, shape: SupportsIndex, order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... -@overload -def reshape( # shape: (int, ...) @ _AnyShapeT - a: _ArrayLike[_ScalarT], +) -> _Array1D[ScalarT]: ... +@overload # shape: ~ShapeT +def reshape[ScalarT: np.generic, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], /, - shape: _AnyShapeT, + shape: ShapeT, order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ... +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... @overload # shape: Sequence[index] -def reshape( - a: _ArrayLike[_ScalarT], +def reshape[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], /, shape: Sequence[SupportsIndex], order: _OrderACF = "C", *, copy: bool | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload # shape: index def reshape( a: ArrayLike, @@ -220,16 +236,16 @@ def reshape( order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[tuple[int], np.dtype]: ... -@overload -def reshape( # shape: (int, ...) @ _AnyShapeT +) -> np.ndarray[_1D]: ... +@overload # shape: ~ShapeT +def reshape[ShapeT: _Shape]( a: ArrayLike, /, - shape: _AnyShapeT, + shape: ShapeT, order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[_AnyShapeT, np.dtype]: ... +) -> np.ndarray[ShapeT]: ... @overload # shape: Sequence[index] def reshape( a: ArrayLike, @@ -240,6 +256,7 @@ def reshape( copy: bool | None = None, ) -> NDArray[Any]: ... +# keep in sync with `ma.core.choose` @overload def choose( a: _IntLike_co, @@ -248,12 +265,12 @@ def choose( mode: _ModeKind = "raise", ) -> Any: ... @overload -def choose( +def choose[ScalarT: np.generic]( a: _ArrayLikeInt_co, - choices: _ArrayLike[_ScalarT], + choices: _ArrayLike[ScalarT], out: None = None, mode: _ModeKind = "raise", -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def choose( a: _ArrayLikeInt_co, @@ -262,32 +279,32 @@ def choose( mode: _ModeKind = "raise", ) -> NDArray[Any]: ... @overload -def choose( +def choose[ArrayT: np.ndarray]( a: _ArrayLikeInt_co, choices: ArrayLike, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", -) -> _ArrayT: ... +) -> ArrayT: ... # keep in sync with `ma.core.repeat` @overload -def repeat( - a: _ArrayLike[_ScalarT], +def repeat[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], repeats: _ArrayLikeInt_co, axis: None = None, -) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +) -> _Array1D[ScalarT]: ... @overload -def repeat( - a: _ArrayLike[_ScalarT], +def repeat[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], repeats: _ArrayLikeInt_co, axis: SupportsIndex, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def repeat( a: ArrayLike, repeats: _ArrayLikeInt_co, axis: None = None, -) -> np.ndarray[tuple[int], np.dtype[Any]]: ... +) -> _Array1D[Any]: ... @overload def repeat( a: ArrayLike, @@ -295,7 +312,7 @@ def repeat( axis: SupportsIndex, ) -> NDArray[Any]: ... -# +# keep in sync with `ma.core.put` def put( a: NDArray[Any], ind: _ArrayLikeInt_co, @@ -305,157 +322,287 @@ def put( # keep in sync with `ndarray.swapaxes` and `ma.core.swapaxes` @overload -def swapaxes(a: _ArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> _ArrayT: ... +def swapaxes[ArrayT: np.ndarray](a: ArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> ArrayT: ... @overload -def swapaxes(a: _ArrayLike[_ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[_ScalarT]: ... +def swapaxes[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[ScalarT]: ... @overload def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[Any]: ... +# @overload -def transpose( - a: _ArrayLike[_ScalarT], - axes: _ShapeLike | None = None, -) -> NDArray[_ScalarT]: ... +def transpose[ArrayT: np.ndarray](a: ArrayT, axes: _ShapeLike | None = None) -> ArrayT: ... @overload -def transpose( - a: ArrayLike, - axes: _ShapeLike | None = None, -) -> NDArray[Any]: ... +def transpose[ScalarT: np.generic](a: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... +@overload +def transpose(a: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... +# @overload -def matrix_transpose(x: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +def matrix_transpose[ArrayT: np.ndarray](x: ArrayT, /) -> ArrayT: ... +@overload +def matrix_transpose[ScalarT: np.generic](x: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def matrix_transpose(x: ArrayLike, /) -> NDArray[Any]: ... # -@overload -def partition( - a: _ArrayLike[_ScalarT], +@overload # Nd +def partition[ArrayT: np.ndarray]( + a: ArrayT, kth: _ArrayLikeInt, - axis: SupportsIndex | None = -1, + axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", - order: None = None, -) -> NDArray[_ScalarT]: ... -@overload -def partition( - a: _ArrayLike[np.void], + order: str | Sequence[str] | None = None, +) -> ArrayT: ... +@overload # ?d +def partition[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], kth: _ArrayLikeInt, - axis: SupportsIndex | None = -1, + axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, -) -> NDArray[np.void]: ... -@overload +) -> NDArray[ScalarT]: ... +@overload # axis: None +def partition[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + kth: _ArrayLikeInt, + axis: None, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> _Array1D[ScalarT]: ... +@overload # fallback def partition( a: ArrayLike, kth: _ArrayLikeInt, - axis: SupportsIndex | None = -1, + axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, ) -> NDArray[Any]: ... +@overload # fallback, axis: None +def partition( + a: ArrayLike, + kth: _ArrayLikeInt, + axis: None, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> _Array1D[Any]: ... -# +# keep roughly in sync with `ndarray.argpartition` +@overload # axis: None +def argpartition( + a: ArrayLike, + kth: _ArrayLikeInt, + axis: None, + kind: _PartitionKind = "introselect", + order: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.intp]]: ... +@overload # known shape, axis: index (default) +def argpartition[ShapeT: _Shape]( + a: np.ndarray[ShapeT], + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.intp]]: ... +@overload # 1d array-like, axis: index (default) +def argpartition( + a: Sequence[np.generic | complex], + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.intp]]: ... +@overload # 2d array-like, axis: index (default) +def argpartition( + a: Sequence[Sequence[np.generic | complex]], + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.intp]]: ... +@overload # ?d array-like, axis: index (default) def argpartition( a: ArrayLike, kth: _ArrayLikeInt, - axis: SupportsIndex | None = -1, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, +) -> NDArray[np.intp]: ... +@overload # void, axis: None +def argpartition( + a: _SupportsArray[np.dtype[np.void]], + kth: _ArrayLikeInt, + axis: None, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, -) -> NDArray[intp]: ... +) -> np.ndarray[tuple[int], np.dtype[intp]]: ... +@overload # void, axis: index (default) +def argpartition[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.void]], + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> np.ndarray[ShapeT, np.dtype[np.intp]]: ... # @overload -def sort( - a: _ArrayLike[_ScalarT], - axis: SupportsIndex | None = -1, +def sort[ArrayT: np.ndarray]( + a: ArrayT, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + *, + stable: bool | None = None, +) -> ArrayT: ... +@overload +def sort[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + *, + stable: bool | None = None, +) -> NDArray[ScalarT]: ... +@overload +def sort[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: None, kind: _SortKind | None = None, order: str | Sequence[str] | None = None, *, stable: bool | None = None, -) -> NDArray[_ScalarT]: ... +) -> _Array1D[ScalarT]: ... @overload def sort( a: ArrayLike, - axis: SupportsIndex | None = -1, + axis: SupportsIndex = -1, kind: _SortKind | None = None, order: str | Sequence[str] | None = None, *, stable: bool | None = None, ) -> NDArray[Any]: ... +@overload +def sort( + a: ArrayLike, + axis: None, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + *, + stable: bool | None = None, +) -> _Array1D[Any]: ... +# +@overload +def argsort[ShapeT: _Shape]( + a: np.ndarray[ShapeT], + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + *, + stable: bool | None = None, +) -> np.ndarray[ShapeT, np.dtype[np.intp]]: ... +@overload def argsort( a: ArrayLike, - axis: SupportsIndex | None = -1, + axis: SupportsIndex = -1, kind: _SortKind | None = None, order: str | Sequence[str] | None = None, *, stable: bool | None = None, -) -> NDArray[intp]: ... - +) -> NDArray[np.intp]: ... @overload -def argmax( +def argsort( a: ArrayLike, + axis: None, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + *, + stable: bool | None = None, +) -> _Array1D[np.intp]: ... + +# keep in sync with `argmin` below +@overload # ?d +def argmax( + a: ArrayLike | _NestedSequence[_Orderable], axis: None = None, out: None = None, *, keepdims: Literal[False] | _NoValueType = ..., -) -> intp: ... -@overload +) -> np.intp: ... +@overload # ?d, axis: def argmax( - a: ArrayLike, + a: ArrayLike | _NestedSequence[_Orderable], + axis: SupportsIndex, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., +) -> NDArray[np.intp]: ... +@overload # Nd, keepdims=True +def argmax[ShapeT: _Shape]( + a: np.ndarray[ShapeT], axis: SupportsIndex | None = None, out: None = None, *, - keepdims: bool | _NoValueType = ..., -) -> Any: ... -@overload + keepdims: Literal[True], +) -> np.ndarray[ShapeT, np.dtype[np.intp]]: ... +@overload # ?d, keepdims=True def argmax( - a: ArrayLike, - axis: SupportsIndex | None, - out: _BoolOrIntArrayT, + a: ArrayLike | _NestedSequence[_Orderable], + axis: SupportsIndex | None = None, + out: None = None, *, - keepdims: bool | _NoValueType = ..., -) -> _BoolOrIntArrayT: ... -@overload -def argmax( - a: ArrayLike, + keepdims: Literal[True], +) -> NDArray[np.intp]: ... +@overload # ?d, out: ArrayT +def argmax[ArrayT: NDArray[np.intp]]( + a: ArrayLike | _NestedSequence[_Orderable], axis: SupportsIndex | None = None, *, - out: _BoolOrIntArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _BoolOrIntArrayT: ... +) -> ArrayT: ... -@overload +# keep in sync with `argmax` above +@overload # ?d def argmin( - a: ArrayLike, + a: ArrayLike | _NestedSequence[_Orderable], axis: None = None, out: None = None, *, keepdims: Literal[False] | _NoValueType = ..., -) -> intp: ... -@overload +) -> np.intp: ... +@overload # ?d, axis: def argmin( - a: ArrayLike, + a: ArrayLike | _NestedSequence[_Orderable], + axis: SupportsIndex, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., +) -> NDArray[np.intp]: ... +@overload # Nd, keepdims=True +def argmin[ShapeT: _Shape]( + a: np.ndarray[ShapeT], axis: SupportsIndex | None = None, out: None = None, *, - keepdims: bool | _NoValueType = ..., -) -> Any: ... -@overload + keepdims: Literal[True], +) -> np.ndarray[ShapeT, np.dtype[np.intp]]: ... +@overload # ?d, keepdims=True def argmin( - a: ArrayLike, - axis: SupportsIndex | None, - out: _BoolOrIntArrayT, + a: ArrayLike | _NestedSequence[_Orderable], + axis: SupportsIndex | None = None, + out: None = None, *, - keepdims: bool | _NoValueType = ..., -) -> _BoolOrIntArrayT: ... -@overload -def argmin( - a: ArrayLike, + keepdims: Literal[True], +) -> NDArray[np.intp]: ... +@overload # ?d, out: ArrayT +def argmin[ArrayT: NDArray[np.intp]]( + a: ArrayLike | _NestedSequence[_Orderable], axis: SupportsIndex | None = None, *, - out: _BoolOrIntArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _BoolOrIntArrayT: ... +) -> ArrayT: ... # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload @@ -473,51 +620,66 @@ def searchsorted( sorter: _ArrayLikeInt_co | None = None, # 1D int array ) -> NDArray[intp]: ... -# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +# keep in sync with `ma.core.resize` @overload -def resize(a: _ArrayLike[_ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +def resize[ScalarT: np.generic](a: _ArrayLike[ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> _Array1D[ScalarT]: ... @overload -def resize(a: _ArrayLike[_ScalarT], new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ... +def resize[ScalarT: np.generic, AnyShapeT: (_0D, _1D, _2D, _3D, _4D)]( + a: _ArrayLike[ScalarT], + new_shape: AnyShapeT, +) -> np.ndarray[AnyShapeT, np.dtype[ScalarT]]: ... @overload -def resize(a: _ArrayLike[_ScalarT], new_shape: _ShapeLike) -> NDArray[_ScalarT]: ... +def resize[ScalarT: np.generic](a: _ArrayLike[ScalarT], new_shape: _ShapeLike) -> NDArray[ScalarT]: ... @overload -def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype]: ... +def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[_1D]: ... @overload -def resize(a: ArrayLike, new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype]: ... +def resize[AnyShapeT: (_0D, _1D, _2D, _3D, _4D)](a: ArrayLike, new_shape: AnyShapeT) -> np.ndarray[AnyShapeT]: ... @overload def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ... # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def squeeze( - a: _ScalarT, - axis: _ShapeLike | None = None, -) -> _ScalarT: ... +def squeeze[ScalarT: np.generic](a: ScalarT, axis: _ShapeLike | None = None) -> ScalarT: ... @overload -def squeeze( - a: _ArrayLike[_ScalarT], - axis: _ShapeLike | None = None, -) -> NDArray[_ScalarT]: ... +def squeeze[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload -def squeeze( - a: ArrayLike, - axis: _ShapeLike | None = None, -) -> NDArray[Any]: ... +def squeeze(a: ArrayLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... # keep in sync with `ma.core.diagonal` -@overload -def diagonal( - a: _ArrayLike[_ScalarT], +@overload # ?d (workaround) +def diagonal[ScalarT: np.generic]( + a: _ArrayJustND[ScalarT], offset: SupportsIndex = 0, axis1: SupportsIndex = 0, - axis2: SupportsIndex = 1, # >= 2D array -) -> NDArray[_ScalarT]: ... -@overload + axis2: SupportsIndex = 1, +) -> NDArray[ScalarT]: ... +@overload # 2d +def diagonal[ScalarT: np.generic]( + a: _ToArray2D[ScalarT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> _Array1D[ScalarT]: ... +@overload # 3d +def diagonal[ScalarT: np.generic]( + a: _ToArray3D[ScalarT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> _Array2D[ScalarT]: ... +@overload # Nd +def diagonal[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> NDArray[ScalarT]: ... +@overload # fallback def diagonal( a: ArrayLike, offset: SupportsIndex = 0, axis1: SupportsIndex = 0, - axis2: SupportsIndex = 1, # >= 2D array + axis2: SupportsIndex = 1, ) -> NDArray[Any]: ... # keep in sync with `ma.core.trace` @@ -531,29 +693,27 @@ def trace( out: None = None, ) -> Any: ... @overload -def trace( +def trace[ArrayT: np.ndarray]( a: ArrayLike, # >= 2D array offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, dtype: DTypeLike | None, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def trace( +def trace[ArrayT: np.ndarray]( a: ArrayLike, # >= 2D array offset: SupportsIndex = 0, axis1: SupportsIndex = 0, axis2: SupportsIndex = 1, dtype: DTypeLike | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... - -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + out: ArrayT, +) -> ArrayT: ... @overload -def ravel(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> _Array1D[_ScalarT]: ... +def ravel[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "C") -> _Array1D[ScalarT]: ... @overload def ravel(a: bytes | _NestedSequence[bytes], order: _OrderKACF = "C") -> _Array1D[np.bytes_]: ... @overload @@ -567,36 +727,47 @@ def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1 @overload def ravel(a: complex | _NestedSequence[complex], order: _OrderKACF = "C") -> _Array1D[np.complex128 | Any]: ... @overload -def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype]: ... +def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[_1D]: ... -def nonzero(a: _ArrayLike[Any]) -> tuple[np.ndarray[tuple[int], np.dtype[intp]], ...]: ... +# keep in sync with the 1-arg overloads of `_core.multiarray.where` +@overload # ?d (workaround) +def nonzero(a: _ArrayJustND[Any]) -> tuple[_Array1D[np.intp], ...]: ... +@overload # 1d +def nonzero(a: _ToArray1D[Any]) -> tuple[_Array1D[np.intp]]: ... +@overload # 2d +def nonzero(a: _ToArray2D[Any]) -> tuple[_Array1D[np.intp], _Array1D[np.intp]]: ... +@overload # 3d +def nonzero(a: _ToArray3D[Any]) -> tuple[_Array1D[np.intp], _Array1D[np.intp], _Array1D[np.intp]]: ... +@overload # Nd (fallback) +def nonzero(a: _ArrayLike[Any]) -> tuple[_Array1D[np.intp], ...]: ... -# this prevents `Any` from being returned with Pyright -@overload +# `collections.abc.Sequence` can't be used here because `bytes` and `str` are +# subtypes of it, which would make the return types incompatible. +@overload # this prevents `Any` from being returned with Pyright def shape(a: _SupportsShape[Never]) -> _AnyShape: ... @overload -def shape(a: _SupportsShape[_ShapeT]) -> _ShapeT: ... +def shape[ShapeT: _Shape](a: _SupportsShape[ShapeT]) -> ShapeT: ... @overload def shape(a: _PyScalar) -> tuple[()]: ... -# `collections.abc.Sequence` can't be used hesre, since `bytes` and `str` are -# subtypes of it, which would make the return types incompatible. +@overload # an unbound type variable is used because `list` is invariant +def shape[ScalarT: _PyScalar](a: _PyArray[ScalarT]) -> _1D: ... @overload -def shape(a: _PyArray[_PyScalar]) -> tuple[int]: ... +def shape[ScalarT: _PyScalar](a: Sequence[_PyArray[ScalarT]]) -> _2D: ... @overload -def shape(a: _PyArray[_PyArray[_PyScalar]]) -> tuple[int, int]: ... -# this overload will be skipped by typecheckers that don't support PEP 688 -@overload -def shape(a: memoryview | bytearray) -> tuple[int]: ... +def shape[ScalarT: _PyScalar](a: Sequence[Sequence[_PyArray[ScalarT]]]) -> _3D: ... +@overload # this will be skipped by typecheckers that don't support PEP 688 +def shape(a: memoryview | bytearray) -> _1D: ... @overload def shape(a: ArrayLike) -> _AnyShape: ... +# @overload -def compress( +def compress[ScalarT: np.generic]( condition: _ArrayLikeBool_co, # 1D bool array - a: _ArrayLike[_ScalarT], + a: _ArrayLike[ScalarT], axis: SupportsIndex | None = None, out: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def compress( condition: _ArrayLikeBool_co, # 1D bool array @@ -605,25 +776,25 @@ def compress( out: None = None, ) -> NDArray[Any]: ... @overload -def compress( +def compress[ArrayT: np.ndarray]( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, axis: SupportsIndex | None, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def compress( +def compress[ArrayT: np.ndarray]( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, axis: SupportsIndex | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def clip( - a: _ScalarT, +def clip[ScalarOrArrayT: np.generic | np.ndarray]( + a: ScalarOrArrayT, a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., out: None = None, @@ -632,7 +803,7 @@ def clip( max: ArrayLike | _NoValueType | None = ..., dtype: None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _ScalarT: ... +) -> ScalarOrArrayT: ... @overload def clip( a: _ScalarLike_co, @@ -646,8 +817,8 @@ def clip( **kwargs: Unpack[_UFuncKwargs], ) -> Any: ... @overload -def clip( - a: _ArrayLike[_ScalarT], +def clip[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., out: None = None, @@ -656,7 +827,7 @@ def clip( max: ArrayLike | _NoValueType | None = ..., dtype: None = None, **kwargs: Unpack[_UFuncKwargs], -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def clip( a: ArrayLike, @@ -670,29 +841,29 @@ def clip( **kwargs: Unpack[_UFuncKwargs], ) -> NDArray[Any]: ... @overload -def clip( +def clip[ArrayT: np.ndarray]( a: ArrayLike, a_min: ArrayLike | None, a_max: ArrayLike | None, - out: _ArrayT, + out: ArrayT, *, min: ArrayLike | _NoValueType | None = ..., max: ArrayLike | _NoValueType | None = ..., dtype: DTypeLike | None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def clip( +def clip[ArrayT: np.ndarray]( a: ArrayLike, a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., *, - out: _ArrayT, + out: ArrayT, min: ArrayLike | _NoValueType | None = ..., max: ArrayLike | _NoValueType | None = ..., dtype: DTypeLike | None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _ArrayT: ... +) -> ArrayT: ... @overload def clip( a: ArrayLike, @@ -706,678 +877,1064 @@ def clip( **kwargs: Unpack[_UFuncKwargs], ) -> Any: ... +# keep in sync with `any` @overload -def sum( - a: _ArrayLike[_ScalarT], +def all( + a: ArrayLike | None, axis: None = None, - dtype: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... -@overload -def sum( - a: _ArrayLike[_ScalarT], - axis: None = None, - dtype: None = None, - out: None = None, - keepdims: bool | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., + *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... -@overload -def sum( +) -> np.bool: ... +@overload # axis: int +def all[ShapeT: _Shape]( a: ArrayLike, - axis: None, - dtype: _DTypeLike[_ScalarT], + axis: int, out: None = None, keepdims: Literal[False] | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., + *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... -@overload -def sum( +) -> NDArray[np.bool]: ... +@overload # axis: (int, ...) +def all[ShapeT: _Shape]( a: ArrayLike, - axis: None = None, - *, - dtype: _DTypeLike[_ScalarT], + axis: tuple[int, ...], out: None = None, keepdims: Literal[False] | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., + *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... -@overload -def sum( - a: ArrayLike, - axis: _ShapeLike | None, - dtype: _DTypeLike[_ScalarT], +) -> NDArray[np.bool] | Any: ... +@overload # Nd, keepdims: True +def all[ShapeT: _Shape]( + a: np.ndarray[ShapeT], + axis: int | tuple[int, ...] | None = None, out: None = None, - keepdims: bool | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... -@overload -def sum( - a: ArrayLike, - axis: _ShapeLike | None = None, *, - dtype: _DTypeLike[_ScalarT], - out: None = None, - keepdims: bool | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... -@overload -def sum( +) -> np.ndarray[ShapeT, np.dtype[np.bool]]: ... +@overload # ?d, keepdims: True +def all[ShapeT: _Shape]( a: ArrayLike, - axis: _ShapeLike | None = None, - dtype: DTypeLike | None = None, + axis: int | tuple[int, ...] | None = None, out: None = None, - keepdims: bool | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., + *, + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., -) -> Any: ... -@overload -def sum( - a: ArrayLike, - axis: _ShapeLike | None, - dtype: DTypeLike | None, - out: _ArrayT, +) -> NDArray[np.bool]: ... +@overload # out: (keyword) +def all[ArrayT: np.ndarray]( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None = None, + *, + out: ArrayT, keepdims: bool | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... -@overload -def sum( - a: ArrayLike, - axis: _ShapeLike | None = None, - dtype: DTypeLike | None = None, - *, - out: _ArrayT, +) -> ArrayT: ... +@overload # out: (positional) +def all[ArrayT: np.ndarray]( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None, + out: ArrayT, keepdims: bool | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., + *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... -# keep in sync with `any` +# keep in sync with `all` @overload -def all( +def any( a: ArrayLike | None, axis: None = None, out: None = None, - keepdims: Literal[False, 0] | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., ) -> np.bool: ... -@overload -def all( - a: ArrayLike | None, - axis: int | tuple[int, ...] | None = None, +@overload # axis: int +def any[ShapeT: _Shape]( + a: ArrayLike, + axis: int, out: None = None, - keepdims: _BoolLike_co | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> Incomplete: ... -@overload -def all( - a: ArrayLike | None, - axis: int | tuple[int, ...] | None, - out: _ArrayT, - keepdims: _BoolLike_co | _NoValueType = ..., +) -> NDArray[np.bool]: ... +@overload # axis: (int, ...) +def any[ShapeT: _Shape]( + a: ArrayLike, + axis: tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... -@overload -def all( - a: ArrayLike | None, +) -> NDArray[np.bool] | Any: ... +@overload # Nd, keepdims: True +def any[ShapeT: _Shape]( + a: np.ndarray[ShapeT], axis: int | tuple[int, ...] | None = None, - *, - out: _ArrayT, - keepdims: _BoolLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... - -# keep in sync with `all` -@overload -def any( - a: ArrayLike | None, - axis: None = None, out: None = None, - keepdims: Literal[False, 0] | _NoValueType = ..., *, + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., -) -> np.bool: ... -@overload -def any( - a: ArrayLike | None, +) -> np.ndarray[ShapeT, np.dtype[np.bool]]: ... +@overload # ?d, keepdims: True +def any[ShapeT: _Shape]( + a: ArrayLike, axis: int | tuple[int, ...] | None = None, out: None = None, - keepdims: _BoolLike_co | _NoValueType = ..., *, + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., -) -> Incomplete: ... -@overload -def any( +) -> NDArray[np.bool]: ... +@overload # out: (keyword) +def any[ArrayT: np.ndarray]( a: ArrayLike | None, - axis: int | tuple[int, ...] | None, - out: _ArrayT, - keepdims: _BoolLike_co | _NoValueType = ..., + axis: int | tuple[int, ...] | None = None, *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... -@overload -def any( +) -> ArrayT: ... +@overload # out: (positional) +def any[ArrayT: np.ndarray]( a: ArrayLike | None, - axis: int | tuple[int, ...] | None = None, + axis: int | tuple[int, ...] | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., *, - out: _ArrayT, - keepdims: _BoolLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... -# +# keep in sync with `cumprod` below +@overload +def cumsum[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, +) -> _Array1D[ScalarT]: ... +@overload +def cumsum[ArrayT: np.ndarray]( + a: ArrayT, + axis: SupportsIndex, + dtype: None = None, + out: None = None, +) -> ArrayT: ... +@overload +def cumsum[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: SupportsIndex, + dtype: None = None, + out: None = None, +) -> NDArray[ScalarT]: ... @overload def cumsum( - a: _ArrayLike[_ScalarT], - axis: SupportsIndex | None = None, + a: ArrayLike, + axis: None = None, dtype: None = None, out: None = None, -) -> NDArray[_ScalarT]: ... +) -> _Array1D[Any]: ... @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = None, + axis: SupportsIndex, dtype: None = None, out: None = None, ) -> NDArray[Any]: ... @overload -def cumsum( +def cumsum[ScalarT: np.generic]( a: ArrayLike, - axis: SupportsIndex | None, - dtype: _DTypeLike[_ScalarT], + axis: None, + dtype: _DTypeLike[ScalarT], out: None = None, -) -> NDArray[_ScalarT]: ... +) -> _Array1D[ScalarT]: ... @overload -def cumsum( +def cumsum[ScalarT: np.generic]( a: ArrayLike, - axis: SupportsIndex | None = None, + axis: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], + out: None = None, +) -> _Array1D[ScalarT]: ... +@overload +def cumsum[ScalarT: np.generic]( + a: ArrayLike, + axis: SupportsIndex, + dtype: _DTypeLike[ScalarT], out: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = None, + axis: None = None, dtype: DTypeLike | None = None, out: None = None, -) -> NDArray[Any]: ... +) -> _Array1D[Any]: ... @overload def cumsum( + a: ArrayLike, + axis: SupportsIndex, + dtype: DTypeLike | None = None, + out: None = None, +) -> NDArray[Any]: ... +@overload +def cumsum[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def cumsum( +def cumsum[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... +# keep in sync with `cumulative_prod` below +@overload +def cumulative_sum[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + /, + *, + axis: None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> _Array1D[ScalarT]: ... +@overload +def cumulative_sum[ArrayT: np.ndarray]( + x: ArrayT, + /, + *, + axis: SupportsIndex, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> ArrayT: ... +@overload +def cumulative_sum[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + /, + *, + axis: SupportsIndex, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[ScalarT]: ... @overload def cumulative_sum( - x: _ArrayLike[_ScalarT], + x: ArrayLike, /, *, - axis: SupportsIndex | None = None, + axis: None = None, dtype: None = None, out: None = None, include_initial: bool = False, -) -> NDArray[_ScalarT]: ... +) -> _Array1D[Any]: ... @overload def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = None, + axis: SupportsIndex, dtype: None = None, out: None = None, include_initial: bool = False, ) -> NDArray[Any]: ... @overload +def cumulative_sum[ScalarT: np.generic]( + x: ArrayLike, + /, + *, + axis: None = None, + dtype: _DTypeLike[ScalarT], + out: None = None, + include_initial: bool = False, +) -> _Array1D[ScalarT]: ... +@overload +def cumulative_sum[ScalarT: np.generic]( + x: ArrayLike, + /, + *, + axis: SupportsIndex, + dtype: _DTypeLike[ScalarT], + out: None = None, + include_initial: bool = False, +) -> NDArray[ScalarT]: ... +@overload def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = None, - dtype: _DTypeLike[_ScalarT], + axis: None = None, + dtype: DTypeLike | None = None, out: None = None, include_initial: bool = False, -) -> NDArray[_ScalarT]: ... +) -> _Array1D[Any]: ... @overload def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = None, + axis: SupportsIndex, dtype: DTypeLike | None = None, out: None = None, include_initial: bool = False, ) -> NDArray[Any]: ... @overload -def cumulative_sum( +def cumulative_sum[ArrayT: np.ndarray]( x: ArrayLike, /, *, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, - out: _ArrayT, + out: ArrayT, include_initial: bool = False, -) -> _ArrayT: ... +) -> ArrayT: ... -@overload +# +@overload # ~builtins.int def ptp( - a: _ArrayLike[_ScalarT], + a: _NestedSequence[list[int]] | list[int], axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., -) -> _ScalarT: ... -@overload +) -> np.int_: ... +@overload # ~builtins.int, axis: def ptp( - a: ArrayLike, - axis: _ShapeLike | None = None, + a: _NestedSequence[list[int]] | list[int], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> NDArray[np.int_]: ... +@overload # ~builtins.int, keepdims=True +def ptp( + a: _NestedSequence[list[int]] | list[int], + axis: int | tuple[int, ...] | None = None, out: None = None, + *, + keepdims: Literal[True], +) -> NDArray[np.int_]: ... +@overload # ~builtins.float +def ptp( + a: _NestedSequence[list[float]] | list[float], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> np.float64: ... +@overload # ~builtins.float, axis: +def ptp( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # ~builtins.float, keepdims=True +def ptp( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], +) -> NDArray[np.float64]: ... +@overload # ~builtins.complex +def ptp( + a: _NestedSequence[list[complex]] | list[complex], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> np.complex128: ... +@overload # ~builtins.complex, axis: +def ptp( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # ~builtins.complex, keepdims=True +def ptp( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], +) -> NDArray[np.complex128]: ... +@overload # ~number | timedelta64 +def ptp[ScalarT: np.number | np.timedelta64]( + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> ScalarT: ... +@overload # ~number | timedelta64 | object_, axis: +def ptp[ScalarT: np.number | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # ~number | timedelta64 | datetime64 | object_, keepdims=True +def ptp[ArrayT: NDArray[np.number | np.timedelta64 | np.object_]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], +) -> ArrayT: ... +@overload # datetime64 +def ptp( + a: _ArrayLike[np.datetime64], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> np.timedelta64[Any]: ... +@overload # datetime64, axis: +def ptp( + a: _ArrayLike[np.datetime64], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> NDArray[np.timedelta64]: ... +@overload # datetime64, keepdims=True +def ptp[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.datetime64]], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], +) -> np.ndarray[ShapeT, np.dtype[np.timedelta64]]: ... +@overload # object_ +def ptp( + a: _ArrayLike[np.object_], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> Any: ... +@overload # out: ArrayT +def ptp[ArrayT: np.ndarray]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + *, + out: ArrayT, keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload # fallback +def ptp( + a: _ArrayLikeNumeric_co, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., ) -> Any: ... -@overload +@overload # fallback, axis: def ptp( - a: ArrayLike, - axis: _ShapeLike | None, - out: _ArrayT, - keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... -@overload + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> NDArray[Any]: ... +@overload # fallback, keepdims=True def ptp( - a: ArrayLike, - axis: _ShapeLike | None = None, + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + out: None = None, *, - out: _ArrayT, - keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... + keepdims: Literal[True], +) -> NDArray[Any]: ... -@overload +# keep in sync with `amin` below +@overload # sequence of just `Any` (workaround) def amax( - a: _ArrayLike[_ScalarT], + a: _NestedSequence[Never], axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... -@overload +) -> Any: ... +@overload # builtins.bool def amax( - a: ArrayLike, - axis: _ShapeLike | None = None, + a: _NestedSequence[bool], + axis: None = None, out: None = None, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> Any: ... -@overload +) -> np.bool: ... +@overload # builtins.bool, axis: def amax( - a: ArrayLike, - axis: _ShapeLike | None, - out: _ArrayT, - keepdims: bool | _NoValueType = ..., + a: _NestedSequence[bool], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... -@overload +) -> NDArray[np.bool]: ... +@overload # builtins.bool, keepdims=True def amax( - a: ArrayLike, - axis: _ShapeLike | None = None, + a: _NestedSequence[bool], + axis: int | tuple[int, ...] | None = None, + out: None = None, *, - out: _ArrayT, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[True], initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... - -@overload -def amin( - a: _ArrayLike[_ScalarT], +) -> NDArray[np.bool]: ... +@overload # ~builtins.int +def amax( + a: _NestedSequence[list[int]] | list[int], axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... -@overload -def amin( - a: ArrayLike, - axis: _ShapeLike | None = None, +) -> np.int_: ... +@overload # ~builtins.int, axis: +def amax( + a: _NestedSequence[list[int]] | list[int], + axis: int | tuple[int, ...], out: None = None, - keepdims: bool | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> Any: ... -@overload -def amin( - a: ArrayLike, - axis: _ShapeLike | None, - out: _ArrayT, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... -@overload -def amin( - a: ArrayLike, - axis: _ShapeLike | None = None, +) -> NDArray[np.int_]: ... +@overload # ~builtins.int, keepdims=True +def amax( + a: _NestedSequence[list[int]] | list[int], + axis: int | tuple[int, ...] | None = None, + out: None = None, *, - out: _ArrayT, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[True], initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... - -# TODO: `np.prod()``: For object arrays `initial` does not necessarily -# have to be a numerical scalar. -# The only requirement is that it is compatible -# with the `.__mul__()` method(s) of the passed array's elements. -# Note that the same situation holds for all wrappers around -# `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`). -# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 -@overload -def prod( - a: _ArrayLikeBool_co, +) -> NDArray[np.int_]: ... +@overload # ~builtins.float +def amax( + a: _NestedSequence[list[float]] | list[float], axis: None = None, - dtype: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> int_: ... -@overload -def prod( - a: _ArrayLikeUInt_co, - axis: None = None, - dtype: None = None, +) -> np.float64: ... +@overload # ~builtins.float, axis: +def amax( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...], out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> uint64: ... -@overload -def prod( - a: _ArrayLikeInt_co, - axis: None = None, - dtype: None = None, +) -> NDArray[np.float64]: ... +@overload # ~builtins.float, keepdims=True +def amax( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...] | None = None, out: None = None, - keepdims: Literal[False] | _NoValueType = ..., + *, + keepdims: Literal[True], initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> int64: ... -@overload -def prod( - a: _ArrayLikeFloat_co, +) -> NDArray[np.float64]: ... +@overload # ~builtins.complex +def amax( + a: _NestedSequence[list[complex]] | list[complex], axis: None = None, - dtype: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> floating: ... -@overload -def prod( - a: _ArrayLikeComplex_co, +) -> np.complex128: ... +@overload # ~builtins.complex, axis: +def amax( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # ~builtins.complex, keepdims=True +def amax( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # +number | timedelta64 | datetime64 +def amax[ScalarT: np.number | np.bool | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], axis: None = None, - dtype: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> complexfloating: ... -@overload -def prod( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - dtype: None = None, +) -> ScalarT: ... +@overload # +number | timedelta64 | datetime64 | object_, axis: +def amax[ScalarT: np.number | np.bool | np.timedelta64 | np.datetime64 | np.object_]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # +number | timedelta64 | datetime64 | object_, keepdims=True +def amax[ArrayT: NDArray[np.number | np.bool | np.timedelta64 | np.datetime64 | np.object_]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # object_ +def amax( + a: _ArrayLike[np.object_], + axis: None = None, out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload # out: ArrayT +def amax[ArrayT: np.ndarray]( + a: _ArrayLikeNumeric_co | _NestedSequence[_Orderable], + axis: int | tuple[int, ...] | None = None, + *, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # fallback +def amax( + a: _ArrayLikeNumeric_co | _NestedSequence[_Orderable], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... -@overload -def prod( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None, - dtype: _DTypeLike[_ScalarT], +@overload # fallback, axis: +def amax( + a: _ArrayLikeNumeric_co | _NestedSequence[_Orderable], + axis: int | tuple[int, ...], out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... -@overload -def prod( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> NDArray[Any]: ... +@overload # fallback, keepdims=True +def amax( + a: _ArrayLikeNumeric_co | _NestedSequence[_Orderable], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[Any]: ... + +max = amax + +# keep in sync with `amax` above +@overload # sequence of just `Any` (workaround) +def amin( + a: _NestedSequence[Never], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload # builtins.bool +def amin( + a: _NestedSequence[bool], axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.bool: ... +@overload # builtins.bool, axis: +def amin( + a: _NestedSequence[bool], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.bool]: ... +@overload # builtins.bool, keepdims=True +def amin( + a: _NestedSequence[bool], + axis: int | tuple[int, ...] | None = None, + out: None = None, *, - dtype: _DTypeLike[_ScalarT], + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.bool]: ... +@overload # ~builtins.int +def amin( + a: _NestedSequence[list[int]] | list[int], + axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... -@overload -def prod( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - dtype: DTypeLike | None = None, +) -> np.int_: ... +@overload # ~builtins.int, axis: +def amin( + a: _NestedSequence[list[int]] | list[int], + axis: int | tuple[int, ...], out: None = None, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.int_]: ... +@overload # ~builtins.int, keepdims=True +def amin( + a: _NestedSequence[list[int]] | list[int], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.int_]: ... +@overload # ~builtins.float +def amin( + a: _NestedSequence[list[float]] | list[float], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.float64: ... +@overload # ~builtins.float, axis: +def amin( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # ~builtins.float, keepdims=True +def amin( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # ~builtins.complex +def amin( + a: _NestedSequence[list[complex]] | list[complex], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.complex128: ... +@overload # ~builtins.complex, axis: +def amin( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # ~builtins.complex, keepdims=True +def amin( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # +number | timedelta64 | datetime64 +def amin[ScalarT: np.number | np.bool | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... +@overload # +number | timedelta64 | datetime64 | object_, axis: +def amin[ScalarT: np.number | np.bool | np.timedelta64 | np.datetime64 | np.object_]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # +number | timedelta64 | datetime64 | object_, keepdims=True +def amin[ArrayT: NDArray[np.number | np.bool | np.timedelta64 | np.datetime64 | np.object_]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # object_ +def amin( + a: _ArrayLike[np.object_], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... -@overload -def prod( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None, - dtype: DTypeLike | None, - out: _ArrayT, +@overload # out: ArrayT +def amin[ArrayT: np.ndarray]( + a: _ArrayLikeNumeric_co | _NestedSequence[_Orderable], + axis: int | tuple[int, ...] | None = None, + *, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... -@overload -def prod( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - dtype: DTypeLike | None = None, +) -> ArrayT: ... +@overload # fallback +def amin( + a: _ArrayLikeNumeric_co | _NestedSequence[_Orderable], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload # fallback, axis: +def amin( + a: _ArrayLikeNumeric_co | _NestedSequence[_Orderable], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[Any]: ... +@overload # fallback, keepdims=True +def amin( + a: _ArrayLikeNumeric_co | _NestedSequence[_Orderable], + axis: int | tuple[int, ...] | None = None, + out: None = None, *, - out: _ArrayT, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[True], initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> NDArray[Any]: ... -# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +min = amin + +# keep in sync with `cumsum` above @overload -def cumprod( - a: _ArrayLikeBool_co, - axis: SupportsIndex | None = None, +def cumprod[ScalarT: np.number | np.bool | np.object_]( + a: _ArrayLike[ScalarT], + axis: None = None, dtype: None = None, out: None = None, -) -> NDArray[int_]: ... +) -> _Array1D[ScalarT]: ... @overload -def cumprod( - a: _ArrayLikeUInt_co, - axis: SupportsIndex | None = None, +def cumprod[ArrayT: NDArray[np.number | np.bool | np.object_]]( + a: ArrayT, + axis: SupportsIndex, dtype: None = None, out: None = None, -) -> NDArray[uint64]: ... +) -> ArrayT: ... @overload -def cumprod( - a: _ArrayLikeInt_co, - axis: SupportsIndex | None = None, +def cumprod[ScalarT: np.number | np.bool | np.object_]( + a: _ArrayLike[ScalarT], + axis: SupportsIndex, dtype: None = None, out: None = None, -) -> NDArray[int64]: ... +) -> NDArray[ScalarT]: ... @overload def cumprod( - a: _ArrayLikeFloat_co, - axis: SupportsIndex | None = None, + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = None, dtype: None = None, out: None = None, -) -> NDArray[floating]: ... +) -> _Array1D[Any]: ... @overload def cumprod( - a: _ArrayLikeComplex_co, - axis: SupportsIndex | None = None, + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex, dtype: None = None, out: None = None, -) -> NDArray[complexfloating]: ... +) -> NDArray[Any]: ... @overload -def cumprod( - a: _ArrayLikeObject_co, - axis: SupportsIndex | None = None, - dtype: None = None, +def cumprod[ScalarT: np.number | np.bool | np.object_]( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[ScalarT], out: None = None, -) -> NDArray[object_]: ... +) -> _Array1D[ScalarT]: ... @overload -def cumprod( +def cumprod[ScalarT: np.number | np.bool | np.object_]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None, - dtype: _DTypeLike[_ScalarT], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, +) -> _Array1D[ScalarT]: ... +@overload +def cumprod[ScalarT: np.number | np.bool | np.object_]( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex, + dtype: _DTypeLike[ScalarT], out: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None = None, - *, - dtype: _DTypeLike[_ScalarT], + axis: None = None, + dtype: DTypeLike | None = None, out: None = None, -) -> NDArray[_ScalarT]: ... +) -> _Array1D[Any]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None = None, + axis: SupportsIndex, dtype: DTypeLike | None = None, out: None = None, ) -> NDArray[Any]: ... @overload -def cumprod( +def cumprod[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None, dtype: DTypeLike | None, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def cumprod( +def cumprod[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... -# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +# keep in sync with `cumulative_sum` above @overload -def cumulative_prod( - x: _ArrayLikeBool_co, +def cumulative_prod[ScalarT: np.number | np.bool | np.object_]( + x: _ArrayLike[ScalarT], /, *, - axis: SupportsIndex | None = None, + axis: None = None, dtype: None = None, out: None = None, include_initial: bool = False, -) -> NDArray[int_]: ... +) -> _Array1D[ScalarT]: ... @overload -def cumulative_prod( - x: _ArrayLikeUInt_co, +def cumulative_prod[ArrayT: NDArray[np.number | np.bool | np.object_]]( + x: ArrayT, /, *, - axis: SupportsIndex | None = None, + axis: SupportsIndex, dtype: None = None, out: None = None, include_initial: bool = False, -) -> NDArray[uint64]: ... +) -> ArrayT: ... @overload -def cumulative_prod( - x: _ArrayLikeInt_co, +def cumulative_prod[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], /, *, - axis: SupportsIndex | None = None, + axis: SupportsIndex, dtype: None = None, out: None = None, include_initial: bool = False, -) -> NDArray[int64]: ... +) -> NDArray[ScalarT]: ... @overload def cumulative_prod( - x: _ArrayLikeFloat_co, + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = None, + axis: None = None, dtype: None = None, out: None = None, include_initial: bool = False, -) -> NDArray[floating]: ... +) -> _Array1D[Any]: ... @overload def cumulative_prod( - x: _ArrayLikeComplex_co, + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = None, + axis: SupportsIndex, dtype: None = None, out: None = None, include_initial: bool = False, -) -> NDArray[complexfloating]: ... +) -> NDArray[Any]: ... @overload -def cumulative_prod( - x: _ArrayLikeObject_co, +def cumulative_prod[ScalarT: np.number | np.bool | np.object_]( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = None, - dtype: None = None, + axis: None = None, + dtype: _DTypeLike[ScalarT], out: None = None, include_initial: bool = False, -) -> NDArray[object_]: ... +) -> _Array1D[ScalarT]: ... @overload -def cumulative_prod( +def cumulative_prod[ScalarT: np.number | np.bool | np.object_]( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = None, - dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex, + dtype: _DTypeLike[ScalarT], out: None = None, include_initial: bool = False, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = None, + axis: None = None, dtype: DTypeLike | None = None, out: None = None, include_initial: bool = False, -) -> NDArray[Any]: ... +) -> _Array1D[Any]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = None, + axis: SupportsIndex, dtype: DTypeLike | None = None, - out: _ArrayT, + out: None = None, + include_initial: bool = False, +) -> NDArray[Any]: ... +@overload +def cumulative_prod[ArrayT: np.ndarray]( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: ArrayT, include_initial: bool = False, -) -> _ArrayT: ... +) -> ArrayT: ... def ndim(a: ArrayLike) -> int: ... @@ -1391,11 +1948,11 @@ def around( out: None = None, ) -> float16: ... @overload -def around( - a: _NumberOrObjectT, +def around[ScalarOrArrayT: np.number | np.object_ | NDArray[np.number | np.object_]]( + a: ScalarOrArrayT, decimals: SupportsIndex = 0, out: None = None, -) -> _NumberOrObjectT: ... +) -> ScalarOrArrayT: ... @overload def around( a: _ComplexLike_co | object_, @@ -1409,11 +1966,11 @@ def around( out: None = None, ) -> NDArray[float16]: ... @overload -def around( - a: _ArrayLike[_NumberOrObjectT], +def around[NumberOrObjectT: np.number | np.object_]( + a: _ArrayLike[NumberOrObjectT], decimals: SupportsIndex = 0, out: None = None, -) -> NDArray[_NumberOrObjectT]: ... +) -> NDArray[NumberOrObjectT]: ... @overload def around( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1421,136 +1978,840 @@ def around( out: None = None, ) -> NDArray[Any]: ... @overload -def around( +def around[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, decimals: SupportsIndex, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def around( +def around[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, decimals: SupportsIndex = 0, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... + +# keep in sync with `sum` below (but without `timedelta64`) +@overload # ~builtins.float +def prod( + a: _NestedSequence[list[float]] | list[float], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _FloatLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.float64: ... +@overload # ~builtins.float, axis: +def prod( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _FloatLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # ~builtins.float, keepdims=True +def prod( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _FloatLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # ~builtins.complex +def prod( + a: _NestedSequence[list[complex]] | list[complex], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.complex128: ... +@overload # ~builtins.complex, axis: +def prod( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # ~builtins.complex, keepdims=True +def prod( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # ~number +def prod[ScalarT: np.number]( + a: _ArrayLike[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... +@overload # ~number | timedelta64 | object_, axis: +def prod[ScalarT: np.number | np.object_]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # ~number | object_, keepdims=True +def prod[ArrayT: NDArray[np.number | np.object_]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # bool_ | +builtins.int +def prod( + a: _DualArrayLike[np.dtype[np.bool], int], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.int_: ... +@overload # bool_ | +builtins.int, axis: +def prod( + a: _DualArrayLike[np.dtype[np.bool], int], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.int_]: ... +@overload # bool_, keepdims=True +def prod[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.bool]], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[np.int_]]: ... +@overload # object_ +def prod( + a: _SupportsArray[np.dtype[np.object_]], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload # dtype: ScalarT +def prod[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... +@overload # dtype: ScalarT (keyword), keepdims=True +def prod[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.object_]], + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (positional), keepdims=True +def prod[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.object_]], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (keyword), keepdims=True +def prod[ScalarT: np.generic]( + a: _ArrayLikeMultiplicative_co, + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # axis: , dtype: ScalarT +def prod[ScalarT: np.generic]( + a: _ArrayLikeMultiplicative_co, + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # out: ArrayT (keyword) +def prod[ArrayT: np.ndarray]( + a: _ArrayLikeMultiplicative_co, + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # out: ArrayT (positional) +def prod[ArrayT: np.ndarray]( + a: _ArrayLikeMultiplicative_co, + axis: int | tuple[int, ...] | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # fallback +def prod( + a: _ArrayLikeMultiplicative_co, + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload # fallback, axis: +def prod( + a: _ArrayLikeMultiplicative_co, + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[Any]: ... +@overload # fallback, keepdims=True +def prod( + a: _ArrayLikeMultiplicative_co, + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[Any]: ... + +# keep in sync with `prod` above (but also accept `timedelta64`) +@overload # ~builtins.float +def sum( + a: _NestedSequence[list[float]] | list[float], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _FloatLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.float64: ... +@overload # ~builtins.float, axis: +def sum( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _FloatLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # ~builtins.float, keepdims=True +def sum( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _FloatLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # ~builtins.complex +def sum( + a: _NestedSequence[list[complex]] | list[complex], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.complex128: ... +@overload # ~builtins.complex, axis: +def sum( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # ~builtins.complex, keepdims=True +def sum( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # ~number | timedelta64 +def sum[ScalarT: np.number | np.timedelta64]( + a: _ArrayLike[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... +@overload # ~number | timedelta64 | object_, axis: +def sum[ScalarT: np.number | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # ~number | timedelta64 | object_, keepdims=True +def sum[ArrayT: NDArray[np.number | np.timedelta64 | np.object_]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # bool_ | +builtins.int +def sum( + a: _DualArrayLike[np.dtype[np.bool], int], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.int_: ... +@overload # bool_ | +builtins.int, axis: +def sum( + a: _DualArrayLike[np.dtype[np.bool], int], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.int_]: ... +@overload # bool_, keepdims=True +def sum[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.bool]], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[np.int_]]: ... +@overload # object_ +def sum( + a: _SupportsArray[np.dtype[np.object_]], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload # dtype: ScalarT +def sum[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... +@overload # dtype: ScalarT (keyword), keepdims=True +def sum[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.timedelta64 | np.object_]], + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (positional), keepdims=True +def sum[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.timedelta64 | np.object_]], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (keyword), keepdims=True +def sum[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # axis: , dtype: ScalarT +def sum[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # out: ArrayT (keyword) +def sum[ArrayT: np.ndarray]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # out: ArrayT (positional) +def sum[ArrayT: np.ndarray]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # fallback +def sum( + a: _ArrayLikeNumeric_co, + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload # fallback, axis: +def sum( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[Any]: ... +@overload # fallback, keepdims=True +def sum( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[Any]: ... + +# +@overload # +integer | ~object_ | +builtins.float +def mean( + a: _DualArrayLike[np.dtype[np.integer | np.bool | np.object_], float], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.float64: ... +@overload # +integer | +builtins.float, axis: +def mean( + a: _DualArrayLike[np.dtype[np.integer | np.bool], float], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # +integer, keepdims=True +def mean[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.integer | np.bool]], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # ~complex (`list` ensures invariance to avoid overlap with the previous overload) +def mean( + a: _NestedSequence[list[complex]] | list[complex], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.complex128: ... +@overload # ~inexact | timedelta64 +def mean[ScalarT: np.inexact | np.timedelta64]( + a: _ArrayLike[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... +@overload # ~inexact | timedelta64 | object_, axis: +def mean[ScalarT: np.inexact | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # ~inexact | timedelta64 | object_, keepdims=True +def mean[ArrayT: NDArray[np.inexact | np.timedelta64 | np.object_]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # dtype: ScalarT +def mean[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... +@overload # dtype: ScalarT (keyword), keepdims=True +def mean[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.timedelta64 | np.object_]], + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (positional), keepdims=True +def mean[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.timedelta64 | np.object_]], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (keyword), keepdims=True +def mean[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # axis: , dtype: ScalarT +def mean[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # out: ArrayT +def mean[ArrayT: np.ndarray]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # fallback +def mean( + a: _ArrayLikeNumeric_co, + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload # fallback, axis: +def mean( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[Any]: ... +@overload # fallback, keepdims=True +def mean( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[Any]: ... -# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 -@overload -def mean( - a: _ArrayLikeFloat_co, +# keep in sync with `mean` above +@overload # +integer | ~object_ | +builtins.float +def std( + a: _DualArrayLike[np.dtype[np.integer | np.bool | np.object_], float], axis: None = None, dtype: None = None, out: None = None, + ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> floating: ... -@overload -def mean( - a: _ArrayLikeComplex_co, - axis: None = None, + mean: _ArrayLikeFloat_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> np.float64: ... +@overload # +integer | +builtins.float, axis: +def std( + a: _DualArrayLike[np.dtype[np.integer | np.bool], float], + axis: int | tuple[int, ...], dtype: None = None, out: None = None, + ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> complexfloating: ... -@overload -def mean( - a: _ArrayLike[np.timedelta64], + mean: _ArrayLikeFloat_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # +integer, keepdims=True +def std[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.integer | np.bool]], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # ~complex (`list` ensures invariance to avoid overlap with the previous overload) +def std( + a: _NestedSequence[list[complex]] | list[complex], axis: None = None, dtype: None = None, out: None = None, + ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> timedelta64: ... -@overload -def mean( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None, - dtype: DTypeLike | None, - out: _ArrayT, - keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> np.complex128: ... +@overload # ~inexact | timedelta64 +def std[ScalarT: np.inexact | np.timedelta64]( + a: _ArrayLike[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... -@overload -def mean( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - dtype: DTypeLike | None = None, + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> ScalarT: ... +@overload # ~inexact | timedelta64 | object_, axis: +def std[ScalarT: np.inexact | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., *, - out: _ArrayT, - keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... -@overload -def mean( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None, - dtype: _DTypeLike[_ScalarT], + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # ~inexact | timedelta64 | object_, keepdims=True +def std[ArrayT: NDArray[np.inexact | np.timedelta64 | np.object_]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, out: None = None, - keepdims: Literal[False] | _NoValueType = ..., *, + ddof: float = 0, + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... -@overload -def mean( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> ArrayT: ... +@overload # dtype: ScalarT +def std[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, axis: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, + ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... -@overload -def mean( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None, - dtype: _DTypeLike[_ScalarT], - out: None, - keepdims: Literal[True, 1], + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> ScalarT: ... +@overload # dtype: ScalarT (keyword), keepdims=True +def std[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.timedelta64 | np.object_]], + axis: int | tuple[int, ...] | None = None, *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., -) -> NDArray[_ScalarT]: ... -@overload -def mean( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None, - dtype: _DTypeLike[_ScalarT], + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (positional), keepdims=True +def std[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.timedelta64 | np.object_]], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], out: None = None, + ddof: float = 0, *, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... -@overload -def mean( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (keyword), keepdims=True +def std[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, - keepdims: bool | _NoValueType = ..., + ddof: float = 0, + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... -@overload -def mean( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - dtype: DTypeLike | None = None, + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # axis: , dtype: ScalarT +def std[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], out: None = None, - keepdims: bool | _NoValueType = ..., + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> Incomplete: ... - -@overload + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # out: ArrayT +def std[ArrayT: np.ndarray]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> ArrayT: ... +@overload # fallback def std( - a: _ArrayLikeComplex_co, + a: _ArrayLikeNumeric_co, axis: None = None, - dtype: None = None, + dtype: DTypeLike | None = None, out: None = None, ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., @@ -1558,89 +2819,90 @@ def std( where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> floating: ... -@overload +) -> Any: ... +@overload # fallback, axis: def std( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - dtype: None = None, + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, out: None = None, ddof: float = 0, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> Any: ... -@overload +) -> NDArray[Any]: ... +@overload # fallback, keepdims=True def std( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None, - dtype: _DTypeLike[_ScalarT], + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, out: None = None, ddof: float = 0, - keepdims: Literal[False] | _NoValueType = ..., *, + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ScalarT: ... -@overload -def std( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> NDArray[Any]: ... + +# keep in sync with `std` above +@overload # +integer | ~object_ | +builtins.float +def var( + a: _DualArrayLike[np.dtype[np.integer | np.bool | np.object_], float], axis: None = None, - *, - dtype: _DTypeLike[_ScalarT], + dtype: None = None, out: None = None, ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., + *, where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeFloat_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ScalarT: ... -@overload -def std( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - dtype: DTypeLike | None = None, +) -> np.float64: ... +@overload # +integer | +builtins.float, axis: +def var( + a: _DualArrayLike[np.dtype[np.integer | np.bool], float], + axis: int | tuple[int, ...], + dtype: None = None, out: None = None, ddof: float = 0, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeFloat_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> Any: ... -@overload -def std( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None, - dtype: DTypeLike | None, - out: _ArrayT, +) -> NDArray[np.float64]: ... +@overload # +integer, keepdims=True +def var[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.integer | np.bool]], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, ddof: float = 0, - keepdims: bool | _NoValueType = ..., *, + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ArrayT: ... -@overload -def std( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - dtype: DTypeLike | None = None, - *, - out: _ArrayT, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # ~complex (`list` ensures invariance to avoid overlap with the previous overload) +def var( + a: _NestedSequence[list[complex]] | list[complex], + axis: None = None, + dtype: None = None, + out: None = None, ddof: float = 0, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., + *, where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ArrayT: ... - -@overload -def var( - a: _ArrayLikeComplex_co, +) -> np.complex128: ... +@overload # ~inexact | timedelta64 +def var[ScalarT: np.inexact | np.timedelta64]( + a: _ArrayLike[ScalarT], axis: None = None, dtype: None = None, out: None = None, @@ -1650,86 +2912,149 @@ def var( where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> floating: ... -@overload -def var( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, +) -> ScalarT: ... +@overload # ~inexact | timedelta64 | object_, axis: +def var[ScalarT: np.inexact | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], dtype: None = None, out: None = None, ddof: float = 0, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> Any: ... -@overload -def var( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None, - dtype: _DTypeLike[_ScalarT], +) -> NDArray[ScalarT]: ... +@overload # ~inexact | timedelta64 | object_, keepdims=True +def var[ArrayT: NDArray[np.inexact | np.timedelta64 | np.object_]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + ddof: float = 0, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> ArrayT: ... +@overload # dtype: ScalarT +def var[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], out: None = None, ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> ScalarT: ... +@overload # dtype: ScalarT (keyword), keepdims=True +def var[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.timedelta64 | np.object_]], + axis: int | tuple[int, ...] | None = None, *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ScalarT: ... -@overload -def var( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = None, +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (positional), keepdims=True +def var[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.timedelta64 | np.object_]], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + *, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (keyword), keepdims=True +def var[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # axis: , dtype: ScalarT +def var[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], out: None = None, ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., + *, where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ScalarT: ... -@overload +) -> NDArray[ScalarT]: ... +@overload # out: ArrayT +def var[ArrayT: np.ndarray]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> ArrayT: ... +@overload # fallback def var( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, + a: _ArrayLikeNumeric_co, + axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., ) -> Any: ... -@overload +@overload # fallback, axis: def var( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None, - dtype: DTypeLike | None, - out: _ArrayT, + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, ddof: float = 0, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ArrayT: ... -@overload +) -> NDArray[Any]: ... +@overload # fallback, keepdims=True def var( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, dtype: DTypeLike | None = None, - *, - out: _ArrayT, + out: None = None, ddof: float = 0, - keepdims: bool | _NoValueType = ..., + *, + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ArrayT: ... +) -> NDArray[Any]: ... -max = amax -min = amin round = around diff --git a/numpy/_core/function_base.py b/numpy/_core/function_base.py index 7fcc6b4f770a..b01ba108d2c4 100644 --- a/numpy/_core/function_base.py +++ b/numpy/_core/function_base.py @@ -38,7 +38,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, .. versionchanged:: 1.20.0 Values are rounded towards ``-inf`` instead of ``0`` when an integer ``dtype`` is specified. The old behavior can - still be obtained with ``np.linspace(start, stop, num).astype(int)`` + still be obtained with ``np.linspace(start, stop, num).astype(np.int_)`` Parameters ---------- @@ -375,9 +375,9 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): Note that the above may not produce exact integers: - >>> np.geomspace(1, 256, num=9, dtype=int) + >>> np.geomspace(1, 256, num=9, dtype=np.int_) array([ 1, 2, 4, 7, 16, 32, 63, 127, 256]) - >>> np.around(np.geomspace(1, 256, num=9)).astype(int) + >>> np.around(np.geomspace(1, 256, num=9)).astype(np.int_) array([ 1, 2, 4, 8, 16, 32, 64, 128, 256]) Negative, decreasing, and complex inputs are allowed: diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 19e1238c4e15..982a169bcbd5 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -1,5 +1,5 @@ from _typeshed import Incomplete -from typing import Literal as L, SupportsIndex, TypeAlias, TypeVar, overload +from typing import Any, Literal as L, SupportsIndex, overload import numpy as np from numpy._typing import ( @@ -7,16 +7,67 @@ from numpy._typing import ( NDArray, _ArrayLikeComplex_co, _ArrayLikeFloat_co, + _ComplexLike_co, _DTypeLike, ) from numpy._typing._array_like import _DualArrayLike __all__ = ["geomspace", "linspace", "logspace"] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _ToFloat64 = float | np.integer | np.bool # `np.float64` is assignable to `float` +type _ToArrayFloat64 = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float] -_ToArrayFloat64: TypeAlias = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float] +### +@overload +def linspace( + start: _ToFloat64, + stop: _ToFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> _Array1D[np.float64]: ... +@overload +def linspace( + start: complex, + stop: complex, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> _Array1D[np.complex128 | Any]: ... +@overload +def linspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex, + endpoint: bool, + retstep: L[False], + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> _Array1D[ScalarT]: ... +@overload +def linspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + *, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> _Array1D[ScalarT]: ... @overload def linspace( start: _ToArrayFloat64, @@ -40,7 +91,7 @@ def linspace( axis: SupportsIndex = 0, *, device: L["cpu"] | None = None, -) -> NDArray[np.floating]: ... +) -> NDArray[np.float64 | Any]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -52,31 +103,31 @@ def linspace( axis: SupportsIndex = 0, *, device: L["cpu"] | None = None, -) -> NDArray[np.complexfloating]: ... +) -> NDArray[np.complex128 | Any]: ... @overload -def linspace( +def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex, endpoint: bool, retstep: L[False], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def linspace( +def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, retstep: L[False] = False, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -90,6 +141,42 @@ def linspace( device: L["cpu"] | None = None, ) -> NDArray[Incomplete]: ... @overload +def linspace( + start: _ToFloat64, + stop: _ToFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[_Array1D[np.float64], np.float64]: ... +@overload +def linspace( + start: complex, + stop: complex, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[_Array1D[np.complex128 | Any], np.complex128 | Any]: ... +@overload +def linspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[_Array1D[ScalarT], ScalarT]: ... +@overload def linspace( start: _ToArrayFloat64, stop: _ToArrayFloat64, @@ -112,7 +199,7 @@ def linspace( dtype: None = None, axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> tuple[NDArray[np.floating], np.floating]: ... +) -> tuple[NDArray[np.float64 | Any], np.float64 | Any]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -124,19 +211,19 @@ def linspace( dtype: None = None, axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> tuple[NDArray[np.complexfloating], np.complexfloating]: ... +) -> tuple[NDArray[np.complex128 | Any], np.complex128 | Any]: ... @overload -def linspace( +def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, *, retstep: L[True], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> tuple[NDArray[_ScalarT], _ScalarT]: ... +) -> tuple[NDArray[ScalarT], ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -150,6 +237,48 @@ def linspace( device: L["cpu"] | None = None, ) -> tuple[NDArray[Incomplete], Incomplete]: ... +# +@overload +def logspace( + start: _ToFloat64, + stop: _ToFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ToFloat64 = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> _Array1D[np.float64]: ... +@overload +def logspace( + start: complex, + stop: complex, + num: SupportsIndex = 50, + endpoint: bool = True, + base: complex = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> _Array1D[np.complex128 | Any]: ... +@overload +def logspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex, + endpoint: bool, + base: _ComplexLike_co, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, +) -> _Array1D[ScalarT]: ... +@overload +def logspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + *, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, +) -> _Array1D[ScalarT]: ... @overload def logspace( start: _ToArrayFloat64, @@ -169,7 +298,7 @@ def logspace( base: _ArrayLikeFloat_co = 10.0, dtype: None = None, axis: SupportsIndex = 0, -) -> NDArray[np.floating]: ... +) -> NDArray[np.float64 | Any]: ... @overload def logspace( start: _ArrayLikeComplex_co, @@ -179,28 +308,28 @@ def logspace( base: _ArrayLikeComplex_co = 10.0, dtype: None = None, axis: SupportsIndex = 0, -) -> NDArray[np.complexfloating]: ... +) -> NDArray[np.complex128 | Any]: ... @overload -def logspace( +def logspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex, endpoint: bool, base: _ArrayLikeComplex_co, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def logspace( +def logspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, base: _ArrayLikeComplex_co = 10.0, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def logspace( start: _ArrayLikeComplex_co, @@ -212,6 +341,44 @@ def logspace( axis: SupportsIndex = 0, ) -> NDArray[Incomplete]: ... +# +@overload +def geomspace( + start: _ToFloat64, + stop: _ToFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> _Array1D[np.float64]: ... +@overload +def geomspace( + start: complex, + stop: complex, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> _Array1D[np.complex128 | Any]: ... +@overload +def geomspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex, + endpoint: bool, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, +) -> _Array1D[ScalarT]: ... +@overload +def geomspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, +) -> _Array1D[ScalarT]: ... @overload def geomspace( start: _ToArrayFloat64, @@ -229,7 +396,7 @@ def geomspace( endpoint: bool = True, dtype: None = None, axis: SupportsIndex = 0, -) -> NDArray[np.floating]: ... +) -> NDArray[np.float64 | Any]: ... @overload def geomspace( start: _ArrayLikeComplex_co, @@ -238,26 +405,26 @@ def geomspace( endpoint: bool = True, dtype: None = None, axis: SupportsIndex = 0, -) -> NDArray[np.complexfloating]: ... +) -> NDArray[np.complex128 | Any]: ... @overload -def geomspace( +def geomspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex, endpoint: bool, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def geomspace( +def geomspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def geomspace( start: _ArrayLikeComplex_co, @@ -268,6 +435,7 @@ def geomspace( axis: SupportsIndex = 0, ) -> NDArray[Incomplete]: ... +# def add_newdoc( place: str, obj: str, diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index 3c03d81165fb..8dfff5f375b1 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -5,7 +5,6 @@ import math import types -import warnings from functools import cached_property from numpy._utils import set_module @@ -17,16 +16,14 @@ def _fr0(a): """fix rank-0 --> rank-1""" if a.ndim == 0: - a = a.copy() - a.shape = (1,) + a = a.reshape((1,)) return a def _fr1(a): """fix rank > 0 --> rank-0""" if a.size == 1: - a = a.copy() - a.shape = () + a = a.reshape(()) return a @@ -171,11 +168,14 @@ class finfo: """ - _finfo_cache = {} + _finfo_cache = {} # noqa: RUF012 __class_getitem__ = classmethod(types.GenericAlias) def __new__(cls, dtype): + if dtype is None: + raise TypeError("dtype must not be None") + try: obj = cls._finfo_cache.get(dtype) # most common path if obj is not None: @@ -183,15 +183,6 @@ def __new__(cls, dtype): except TypeError: pass - if dtype is None: - # Deprecated in NumPy 1.25, 2023-01-16 - warnings.warn( - "finfo() dtype cannot be None. This behavior will " - "raise an error in the future. (Deprecated in NumPy 1.25)", - DeprecationWarning, - stacklevel=2 - ) - try: dtype = numeric.dtype(dtype) except TypeError: @@ -404,8 +395,8 @@ class iinfo: """ - _min_vals = {} - _max_vals = {} + _min_vals = {} # noqa: RUF012 + _max_vals = {} # noqa: RUF012 __class_getitem__ = classmethod(types.GenericAlias) @@ -416,7 +407,7 @@ def __init__(self, int_type): self.dtype = numeric.dtype(type(int_type)) self.kind = self.dtype.kind self.bits = self.dtype.itemsize * 8 - self.key = "%s%d" % (self.kind, self.bits) + self.key = f"{self.kind}{self.bits}" if self.kind not in 'iu': raise ValueError(f"Invalid integer data type {self.kind!r}.") @@ -458,5 +449,5 @@ def __str__(self): return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max} def __repr__(self): - return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__, - self.min, self.max, self.dtype) + name = self.__class__.__name__ + return f'{name}(min={self.min}, max={self.max}, dtype={self.dtype})' diff --git a/numpy/_core/include/numpy/arrayscalars.h b/numpy/_core/include/numpy/arrayscalars.h index ff048061f70a..46bc58cc2a35 100644 --- a/numpy/_core/include/numpy/arrayscalars.h +++ b/numpy/_core/include/numpy/arrayscalars.h @@ -173,9 +173,11 @@ typedef struct { #define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) #define PyArrayScalar_FromLong(i) \ ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) -#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ - return Py_INCREF(PyArrayScalar_FromLong(i)), \ - PyArrayScalar_FromLong(i) +#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) do { \ + PyObject *obj = PyArrayScalar_FromLong(i); \ + Py_INCREF(obj); \ + return obj; \ +} while (0) #define PyArrayScalar_RETURN_FALSE \ return Py_INCREF(PyArrayScalar_False), \ PyArrayScalar_False diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index 5ac964782ec0..15b4572c2802 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -80,6 +80,7 @@ typedef enum { * assume that if set, it also applies to normal operations though! */ NPY_METH_IS_REORDERABLE = 1 << 3, + _NPY_METH_IS_CAST = 1 << 4, /* automatically set for casts */ /* * Private flag for now for *logic* functions. The logical functions * `logical_or` and `logical_and` can always cast the inputs to booleans @@ -114,18 +115,18 @@ typedef struct PyArrayMethod_Context_tag { PyArray_Descr *const *descriptors; #if NPY_FEATURE_VERSION > NPY_2_3_API_VERSION void * _reserved; - /* + /* * Optional flag to pass information into the inner loop * NPY_ARRAYMETHOD_CONTEXT_FLAGS */ uint64_t flags; - + /* * Optional run-time parameters to pass to the loop (currently used in sorting). * Fixed parameters are expected to be passed via auxdata. */ void *parameters; - + /* Structure may grow (this is harmless for DType authors) */ #endif } PyArrayMethod_Context; @@ -495,7 +496,7 @@ typedef PyArray_Descr *(PyArrayDTypeMeta_EnsureCanonical)(PyArray_Descr *dtype); typedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtype); /* - * Constants that can be queried and used e.g. by reduce identies defaults. + * Constants that can be queried and used e.g. by reducing identities defaults. * These are also used to expose .finfo and .iinfo for example. */ /* Numerical constants */ diff --git a/numpy/_core/include/numpy/ndarrayobject.h b/numpy/_core/include/numpy/ndarrayobject.h index f06bafe5b52a..9cc1a4c1d000 100644 --- a/numpy/_core/include/numpy/ndarrayobject.h +++ b/numpy/_core/include/numpy/ndarrayobject.h @@ -32,7 +32,7 @@ extern "C" { #define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type) #define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type) -#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type) +#define PyArray_CheckExact(op) (Py_TYPE((PyObject*)(op)) == &PyArray_Type) #define PyArray_HasArrayInterfaceType(op, type, context, out) \ ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \ @@ -220,15 +220,6 @@ NPY_TITLE_KEY_check(PyObject *key, PyObject *value) if (key == title) { return 1; } -#ifdef PYPY_VERSION - /* - * On PyPy, dictionary keys do not always preserve object identity. - * Fall back to comparison by value. - */ - if (PyUnicode_Check(title) && PyUnicode_Check(key)) { - return PyUnicode_Compare(title, key) == 0 ? 1 : 0; - } -#endif return 0; } @@ -248,12 +239,12 @@ NPY_TITLE_KEY_check(PyObject *key, PyObject *value) static inline npy_intp PyArray_ITEMSIZE(const PyArrayObject *arr) { - return PyDataType_ELSIZE(((PyArrayObject_fields *)arr)->descr); + return PyDataType_ELSIZE(PyArray_DESCR(arr)); } #define PyDataType_HASFIELDS(obj) (PyDataType_ISLEGACY((PyArray_Descr*)(obj)) && PyDataType_NAMES((PyArray_Descr*)(obj)) != NULL) #define PyDataType_HASSUBARRAY(dtype) (PyDataType_ISLEGACY(dtype) && PyDataType_SUBARRAY(dtype) != NULL) -#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \ +#define PyDataType_ISUNSIZED(dtype) (PyDataType_ELSIZE((PyArray_Descr*)(dtype)) == 0 && \ !PyDataType_HASFIELDS(dtype)) #define PyDataType_FLAGCHK(dtype, flag) \ @@ -279,8 +270,7 @@ PyArray_ITEMSIZE(const PyArrayObject *arr) static inline PyObject * PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) { - return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->getitem( - (void *)itemptr, (PyArrayObject *)arr); + return PyDataType_GetArrFuncs(PyArray_DESCR(arr))->getitem((void *)itemptr, (PyArrayObject *)arr); } /* @@ -291,7 +281,7 @@ PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) static inline int PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) { - return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->setitem(v, itemptr, arr); + return PyDataType_GetArrFuncs(PyArray_DESCR(arr))->setitem(v, itemptr, arr); } #endif /* not internal */ diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index f740788f3720..7aa23dd3426b 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -127,7 +127,6 @@ enum NPY_TYPECHAR { NPY_CLONGDOUBLELTR = 'G', NPY_OBJECTLTR = 'O', NPY_STRINGLTR = 'S', - NPY_DEPRECATED_STRINGLTR2 = 'a', NPY_UNICODELTR = 'U', NPY_VOIDLTR = 'V', NPY_DATETIMELTR = 'M', @@ -174,7 +173,7 @@ enum NPY_TYPECHAR { * should be downstream compatible, but the actual algorithms used may be * different than before. The new approach should be more flexible and easier * to update. - * + * * Names with a leading underscore are private, and should only be used * internally by NumPy. * @@ -188,7 +187,7 @@ typedef enum { NPY_HEAPSORT = 1, NPY_MERGESORT = 2, NPY_STABLESORT = 2, - // new style names + // new style names _NPY_SORT_HEAPSORT = 1, NPY_SORT_DEFAULT = 0, NPY_SORT_STABLE = 2, @@ -736,6 +735,8 @@ typedef struct _arr_descr { PyObject *shape; /* a tuple */ } PyArray_ArrayDescr; +#define PyDataType_TYPENUM(descr) (((PyArray_Descr *)(descr))->type_num) + /* * Memory handler structure for array data. */ @@ -1598,7 +1599,7 @@ PyArray_FLAGS(const PyArrayObject *arr) static inline int PyArray_TYPE(const PyArrayObject *arr) { - return ((PyArrayObject_fields *)arr)->descr->type_num; + return PyDataType_TYPENUM(PyArray_DESCR(arr)); } static inline int @@ -1610,7 +1611,7 @@ PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) static inline PyArray_Descr * PyArray_DTYPE(const PyArrayObject *arr) { - return ((PyArrayObject_fields *)arr)->descr; + return PyArray_DESCR(arr); } static inline npy_intp * @@ -1668,8 +1669,9 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) ((type) <= NPY_LONGDOUBLE)) || \ ((type) == NPY_HALF)) -#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ - ((type) == NPY_HALF)) +#define PyTypeNum_ISNUMBER(type) (((type) >= 0) && \ + (((type) <= NPY_CLONGDOUBLE) || \ + ((type) == NPY_HALF))) #define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ ((type) == NPY_UNICODE)) @@ -1693,21 +1695,21 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) #define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) -#define PyDataType_ISLEGACY(dtype) ((dtype)->type_num < NPY_VSTRING && ((dtype)->type_num >= 0)) -#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) -#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0) +#define PyDataType_ISLEGACY(dtype) (PyDataType_TYPENUM(dtype) < NPY_VSTRING && (PyDataType_TYPENUM(dtype) >= 0)) +#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(PyDataType_TYPENUM(obj)) +#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyDataType_TYPENUM(obj)) +#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyDataType_TYPENUM(obj)) +#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyDataType_TYPENUM(obj)) +#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyDataType_TYPENUM(obj)) +#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyDataType_TYPENUM(obj)) +#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(PyDataType_TYPENUM(obj)) +#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyDataType_TYPENUM(obj)) +#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyDataType_TYPENUM(obj)) +#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyDataType_TYPENUM(obj)) +#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyDataType_TYPENUM(obj)) +#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyDataType_TYPENUM(obj)) +#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyDataType_TYPENUM(obj)) +#define PyDataType_MAKEUNSIZED(dtype) PyDataType_SET_ELSIZE(dtype, 0) /* * PyDataType_* FLAGS, FLACHK, REFCHK, HASFIELDS, HASSUBARRAY, UNSIZED, * SUBARRAY, NAMES, FIELDS, C_METADATA, and METADATA require version specific @@ -1756,7 +1758,7 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) #define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) #define PyArray_IsNativeByteOrder PyArray_ISNBO -#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) +#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyDataType_BYTEORDER(PyArray_DESCR(m))) #define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) #define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ @@ -1770,7 +1772,7 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) #define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) -#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) +#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(PyDataType_BYTEORDER(d)) #define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) /************************************************************ @@ -1958,7 +1960,7 @@ typedef struct { * #endif * #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ * #define NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ - * + * * #ifndef NPY_NO_DEPRECATED_API * #if defined(_WIN32) * #define _WARN___STR2__(x) #x diff --git a/numpy/_core/include/numpy/npy_2_compat.h b/numpy/_core/include/numpy/npy_2_compat.h index e39e65aedea7..9b14bc2de86a 100644 --- a/numpy/_core/include/numpy/npy_2_compat.h +++ b/numpy/_core/include/numpy/npy_2_compat.h @@ -213,6 +213,11 @@ DESCR_ACCESSOR(SUBARRAY, subarray, PyArray_ArrayDescr *, 1) DESCR_ACCESSOR(NAMES, names, PyObject *, 1) DESCR_ACCESSOR(FIELDS, fields, PyObject *, 1) DESCR_ACCESSOR(C_METADATA, c_metadata, NpyAuxData *, 1) +/* ABI compatible in 1.x and 2.x, but defined together with others */ +DESCR_ACCESSOR(TYPE, type, char, 0) +DESCR_ACCESSOR(KIND, kind, char, 0) +DESCR_ACCESSOR(BYTEORDER, byteorder, char, 0) +DESCR_ACCESSOR(TYPEOBJ, typeobj, PyTypeObject *, 0) #undef DESCR_ACCESSOR diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index c129a3aceb6d..4bfe3ab09dea 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -7,12 +7,6 @@ * On Mac OS X, because there is only one configuration stage for all the archs * in universal builds, any macro which depends on the arch needs to be * hardcoded. - * - * Note that distutils/pip will attempt a universal2 build when Python itself - * is built as universal2, hence this hardcoding is needed even if we do not - * support universal2 wheels anymore (see gh-22796). - * This code block can be removed after we have dropped the setup.py based - * build completely. */ #ifdef __APPLE__ #undef NPY_SIZEOF_LONG @@ -85,6 +79,7 @@ #define NPY_2_2_API_VERSION 0x00000013 #define NPY_2_3_API_VERSION 0x00000014 #define NPY_2_4_API_VERSION 0x00000015 +#define NPY_2_5_API_VERSION 0x00000015 /* @@ -107,10 +102,11 @@ * default, or narrow it down if they wish to use newer API. If you adjust * this, consider the Python version support (example for 1.25.x): * - * NumPy 1.25.x supports Python: 3.9 3.10 3.11 (3.12) - * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9 - * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8 - * NumPy 1.15.x supports Python: ... 3.6 3.7 + * NumPy 1.26.x supports Python: 3.9 3.10 3.11 3.12 + * NumPy 1.25.x supports Python: 3.9 3.10 3.11 + * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9 + * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8 + * NumPy 1.15.x supports Python: ... 3.6 3.7 * * Users of the stable ABI may wish to target the last Python that is not * end of life. This would be 3.8 at NumPy 1.25 release time. @@ -124,8 +120,8 @@ /* user provided a target version, use it */ #define NPY_FEATURE_VERSION NPY_TARGET_VERSION #else - /* Use the default (increase when dropping Python 3.11 support) */ - #define NPY_FEATURE_VERSION NPY_1_23_API_VERSION + /* Use the default (increase when dropping Python 3.12 support) */ + #define NPY_FEATURE_VERSION NPY_1_25_API_VERSION #endif /* Sanity check the (requested) feature version */ @@ -177,6 +173,8 @@ #define NPY_FEATURE_VERSION_STRING "2.3" #elif NPY_FEATURE_VERSION == NPY_2_4_API_VERSION #define NPY_FEATURE_VERSION_STRING "2.4" +#elif NPY_FEATURE_VERSION == NPY_2_5_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.5" #else #error "Missing version string define for new NumPy version." #endif diff --git a/numpy/_core/include/numpy/ufuncobject.h b/numpy/_core/include/numpy/ufuncobject.h index f5f82b57c91f..3d0d2deac6bd 100644 --- a/numpy/_core/include/numpy/ufuncobject.h +++ b/numpy/_core/include/numpy/ufuncobject.h @@ -122,8 +122,8 @@ typedef struct _tagPyUFuncObject { /* The number of elements in 'functions' and 'data' */ int ntypes; - /* Used to be unused field 'check_return' */ - int reserved1; + /* Flags for the ufunc (e.g. UFUNC_NO_FLOATINGPOINT_ERRORS) */ + int _ufunc_flags; /* The name of the ufunc */ const char *name; @@ -223,7 +223,7 @@ typedef struct _tagPyUFuncObject { #if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION /* New private fields related to dispatching */ void *_dispatch_cache; - /* A PyListObject of `(tuple of DTypes, ArrayMethod/Promoter)` */ + /* Ordered dict `tuple of DTypes -> (tuple of DTypes, ArrayMethod/Promoter)` */ PyObject *_loops; #endif #if NPY_FEATURE_VERSION >= NPY_2_1_API_VERSION @@ -232,7 +232,9 @@ typedef struct _tagPyUFuncObject { */ PyUFunc_ProcessCoreDimsFunc *process_core_dims_func; #endif -} PyUFuncObject; +} PyUFuncObject_fields; + +typedef PyUFuncObject_fields PyUFuncObject; #include "arrayobject.h" /* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */ @@ -247,6 +249,15 @@ typedef struct _tagPyUFuncObject { #define UFUNC_OBJ_ISOBJECT 1 #define UFUNC_OBJ_NEEDS_API 2 +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD +/* + * Flag stored in PyUFuncObject._ufunc_flags to indicate that non-object loops + * of this ufunc never raise floating point errors. Used to skip the + * expensive npy_clear_floatstatus/npy_get_floatstatus calls. + */ +#define UFUNC_NO_FLOATINGPOINT_ERRORS 0x1 +#endif /* NPY_INTERNAL_BUILD */ + #if NPY_ALLOW_THREADS #define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0); @@ -336,6 +347,12 @@ typedef struct _loop1d_info { #include "__ufunc_api.h" +// In future, when adding support for opaque PyObject, this would become +// a ABI function call to get the ufunc struct fields from the PyObject. +static inline PyUFuncObject_fields *_PyUFuncObject_GET_ITEM_DATA(PyUFuncObject *ufunc) { + return (PyUFuncObject_fields *)ufunc; +} + #ifdef __cplusplus } #endif diff --git a/numpy/_core/memmap.py b/numpy/_core/memmap.py index 8cfa7f94a8da..89a36808e6f1 100644 --- a/numpy/_core/memmap.py +++ b/numpy/_core/memmap.py @@ -130,8 +130,7 @@ class memmap(ndarray): Examples -------- >>> import numpy as np - >>> data = np.arange(12, dtype='float32') - >>> data.resize((3,4)) + >>> data = np.arange(12, dtype=np.float32).reshape((3, 4)) This example uses a temporary file so that doctest doesn't write files to your directory. You would use a 'normal' filename. @@ -142,7 +141,7 @@ class memmap(ndarray): Create a memmap with dtype and shape that matches our data: - >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) + >>> fp = np.memmap(filename, dtype=np.float32, mode='w+', shape=(3,4)) >>> fp memmap([[0., 0., 0., 0.], [0., 0., 0., 0.], @@ -165,7 +164,7 @@ class memmap(ndarray): Load the memmap and verify data was stored: - >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> newfp = np.memmap(filename, dtype=np.float32, mode='r', shape=(3,4)) >>> newfp memmap([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], @@ -173,13 +172,13 @@ class memmap(ndarray): Read-only memmap: - >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> fpr = np.memmap(filename, dtype=np.float32, mode='r', shape=(3,4)) >>> fpr.flags.writeable False Copy-on-write memmap: - >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4)) + >>> fpc = np.memmap(filename, dtype=np.float32, mode='c', shape=(3,4)) >>> fpc.flags.writeable True @@ -205,7 +204,7 @@ class memmap(ndarray): Offset into a memmap: - >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16) + >>> fpo = np.memmap(filename, dtype=np.float32, mode='r', offset=16) >>> fpo memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32) @@ -213,7 +212,7 @@ class memmap(ndarray): __array_priority__ = -100.0 - def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, + def __new__(cls, filename, dtype=uint8, mode='r+', offset=0, shape=None, order='C'): # Import here to minimize 'import numpy' overhead import mmap @@ -290,7 +289,7 @@ def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, array_offset = offset - start mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start) - self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm, + self = ndarray.__new__(cls, shape, dtype=descr, buffer=mm, offset=array_offset, order=order) self._mmap = mm self.offset = offset diff --git a/numpy/_core/memmap.pyi b/numpy/_core/memmap.pyi index 0b31328404fb..bdb3dc721de2 100644 --- a/numpy/_core/memmap.pyi +++ b/numpy/_core/memmap.pyi @@ -1,3 +1,94 @@ -from numpy import memmap +from _typeshed import StrOrBytesPath, SupportsWrite +from typing import ( + Any, + ClassVar, + Final, + Literal, + Protocol, + Self, + overload, + override, + type_check_only, +) +from typing_extensions import TypeVar + +import numpy as np +from numpy import _OrderKACF, _SupportsFileMethods +from numpy._typing import DTypeLike, _AnyShape, _DTypeLike, _Shape __all__ = ["memmap"] + +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype[Any], default=np.dtype[Any], covariant=True) + +type _Mode = Literal["r", "c", "r+", "w+"] +type _ToMode = Literal[_Mode, "readonly", "copyonwrite", "readwrite", "write"] + +@type_check_only +class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ... + +### + +class memmap(np.ndarray[_ShapeT_co, _DTypeT_co]): + __module__: Literal["numpy"] = "numpy" # pyrefly: ignore[bad-override] + __array_priority__: ClassVar[float] = 100.0 # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] + + filename: Final[str | None] + offset: Final[int] + mode: Final[_Mode] + + @overload + def __new__[ScalarT: np.generic]( + cls, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: _DTypeT_co, + mode: _ToMode = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> Self: ... + @overload + def __new__( + cls, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: type[np.uint8] = ..., + mode: _ToMode = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap[_AnyShape, np.dtype[np.uint8]]: ... + @overload + def __new__[ScalarT: np.generic]( + cls, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: _DTypeLike[ScalarT], + mode: _ToMode = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap[_AnyShape, np.dtype[ScalarT]]: ... + @overload + def __new__( + cls, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: DTypeLike, + mode: _ToMode = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap: ... + + # + @override + def __array_finalize__(self, obj: object, /) -> None: ... + @override + def __array_wrap__( # type: ignore[override] + self, + /, + array: memmap[_ShapeT_co, _DTypeT_co], + context: tuple[np.ufunc, tuple[Any, ...], int] | None = None, + return_scalar: bool = False, + ) -> Any: ... + + # + def flush(self) -> None: ... diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 3a0b52c3b079..dc3985a0f5a3 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -51,6 +51,7 @@ C_ABI_VERSION = '0x02000000' # 0x00000013 - 2.2.x # 0x00000014 - 2.3.x # 0x00000015 - 2.4.x +# 0x00000015 - 2.5.x C_API_VERSION = '0x00000015' # Check whether we have a mismatch between the set C API VERSION and the @@ -604,33 +605,6 @@ npymath_lib = static_library('npymath', gnu_symbol_visibility: 'hidden', ) -dir_separator = '/' -if build_machine.system() == 'windows' - dir_separator = '\\' -endif -configure_file( - input: 'npymath.ini.in', - output: 'npymath.ini', - configuration: configuration_data({ - 'pkgname' : 'numpy._core', - 'sep' : dir_separator, - }), - install: true, - install_dir: np_dir / '_core/lib/npy-pkg-config', - install_tag: 'devel' -) -configure_file( - input: 'mlib.ini.in', - output: 'mlib.ini', - configuration: configuration_data({ - 'posix_mathlib' : mlib_linkflag, - 'msvc_mathlib' : 'm.lib', - }), - install: true, - install_dir: np_dir / '_core/lib/npy-pkg-config', - install_tag: 'devel' -) - if false # This doesn't quite work (yet), it assumes we'll install headers under # include/, and trying to add the correct path with `extra_cflags` runs into @@ -758,7 +732,7 @@ py.extension_module('_multiarray_tests', src_file.process('src/multiarray/_multiarray_tests.c.src'), 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', - 'src/common/npy_hashtable.cpp', + 'src/common/npy_hashtable.c', 'src/common/npy_import.c', src_file.process('src/common/templ_common.h.src') ], @@ -1113,13 +1087,14 @@ src_multiarray_umath_common = [ 'src/common/gil_utils.c', 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', - 'src/common/npy_hashtable.cpp', + 'src/common/npy_hashtable.c', 'src/common/npy_import.c', 'src/common/npy_longdouble.c', 'src/common/ufunc_override.c', 'src/common/numpyos.c', 'src/common/npy_cpu_features.c', 'src/common/npy_cpu_dispatch.c', + 'src/common/npy_sort.c', src_file.process('src/common/templ_common.h.src') ] if have_blas @@ -1232,6 +1207,7 @@ src_umath = umath_gen_headers + [ src_file.process('src/umath/scalarmath.c.src'), 'src/umath/ufunc_object.c', 'src/umath/umathmodule.c', + 'src/umath/real_imag_ufuncs.cpp', 'src/umath/special_integer_comparisons.cpp', 'src/umath/string_ufuncs.cpp', 'src/umath/stringdtype_ufuncs.cpp', diff --git a/numpy/_core/mlib.ini.in b/numpy/_core/mlib.ini.in deleted file mode 100644 index badaa2ae9de4..000000000000 --- a/numpy/_core/mlib.ini.in +++ /dev/null @@ -1,12 +0,0 @@ -[meta] -Name = mlib -Description = Math library used with this version of numpy -Version = 1.0 - -[default] -Libs=@posix_mathlib@ -Cflags= - -[msvc] -Libs=@msvc_mathlib@ -Cflags= diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index dd5a66a8785f..c7f21b60c488 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -9,7 +9,7 @@ import functools from . import _multiarray_umath, overrides -from ._multiarray_umath import * # noqa: F403 +from ._multiarray_umath import * # These imports are needed for backward compatibility, # do not change them. issue gh-15518 @@ -210,6 +210,9 @@ def concatenate(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): Join a sequence of arrays along an existing axis. + .. versionadded:: 2.0 + ``numpy.concat`` added as a shorthand for ``numpy.concatenate``. + Parameters ---------- a1, a2, ... : sequence of array_like @@ -700,7 +703,7 @@ def min_scalar_type(a, /): >>> np.min_scalar_type(1e50) dtype('float64') - >>> np.min_scalar_type(np.arange(4,dtype='f8')) + >>> np.min_scalar_type(np.arange(4, dtype=np.float64)) dtype('float64') """ @@ -732,10 +735,10 @@ def result_type(*arrays_and_dtypes): Examples -------- >>> import numpy as np - >>> np.result_type(3, np.arange(7, dtype='i1')) + >>> np.result_type(3, np.arange(7, dtype=np.int8)) dtype('int8') - >>> np.result_type('i4', 'c8') + >>> np.result_type(np.int32, np.complex64) dtype('complex128') >>> np.result_type(3.0, -2) @@ -899,7 +902,7 @@ def vdot(a, b, /): >>> 1*4 + 4*1 + 5*2 + 6*2 30 - """ # noqa: E501 + """ return (a, b) @@ -961,7 +964,7 @@ def bincount(x, /, weights=None, minlength=0): The input array needs to be of integer dtype, otherwise a TypeError is raised: - >>> np.bincount(np.arange(5, dtype=float)) + >>> np.bincount(np.arange(5, dtype=np.float64)) Traceback (most recent call last): ... TypeError: Cannot cast array data from dtype('float64') to dtype('int64') diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 0293d193cbc4..e35e2375db1c 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -1,25 +1,25 @@ -# TODO: Sort out any and all missing functions in this namespace import datetime as dt from _typeshed import Incomplete, StrOrBytesPath, SupportsLenAndGetItem -from collections.abc import Callable, Iterable, Sequence +from collections.abc import Buffer, Callable, Iterable, Sequence +from types import EllipsisType, TracebackType from typing import ( Any, ClassVar, Final, + Generic, Literal as L, + Never, Protocol, + Self, SupportsIndex, - TypeAlias, - TypeVar, final, overload, type_check_only, ) -from typing_extensions import CapsuleType +from typing_extensions import CapsuleType, TypeVar import numpy as np -from numpy import ( # type: ignore[attr-defined] # Python >=3.12 - _AnyShapeT, +from numpy import ( _CastingKind, _CopyMode, _ModeKind, @@ -27,27 +27,22 @@ from numpy import ( # type: ignore[attr-defined] # Python >=3.12 _NDIterFlagsOp, _OrderCF, _OrderKACF, - _SupportsBuffer, _SupportsFileMethods, broadcast, - busdaycalendar, complexfloating, correlate, count_nonzero, datetime64, dtype, einsum as c_einsum, - flatiter, float64, floating, from_dlpack, - generic, int_, interp, intp, matmul, ndarray, - nditer, signedinteger, str_, timedelta64, @@ -67,11 +62,13 @@ from numpy._typing import ( _ArrayLikeComplex_co, _ArrayLikeDT64_co, _ArrayLikeFloat_co, + _ArrayLikeInt, _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeStr_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, + _DT64Codes, _DTypeLike, _FloatLike_co, _IntLike_co, @@ -183,22 +180,21 @@ __all__ = [ "zeros", ] -_ScalarT = TypeVar("_ScalarT", bound=generic) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_ArrayT = TypeVar("_ArrayT", bound=ndarray) -_ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, covariant=True) -_ShapeT = TypeVar("_ShapeT", bound=_Shape) -# TODO: fix the names of these typevars -_ReturnType = TypeVar("_ReturnType") -_IDType = TypeVar("_IDType") -_Nin = TypeVar("_Nin", bound=int) -_Nout = TypeVar("_Nout", bound=int) +_ArrayT_co = TypeVar("_ArrayT_co", bound=np.ndarray, default=np.ndarray, covariant=True) -_Array: TypeAlias = ndarray[_ShapeT, dtype[_ScalarT]] -_Array1D: TypeAlias = ndarray[tuple[int], dtype[_ScalarT]] +type _Array[ShapeT: _Shape, ScalarT: np.generic] = ndarray[ShapeT, dtype[ScalarT]] +type _Array1D[ScalarT: np.generic] = ndarray[tuple[int], dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = ndarray[tuple[int, int], dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = ndarray[tuple[int, int, int], dtype[ScalarT]] +# workaround for mypy's and pyright's typing spec non-compliance regarding overloads +type _ArrayJustND[ScalarT: np.generic] = ndarray[tuple[Never, Never, Never, Never], dtype[ScalarT]] + +type _ToArray1D[ScalarT: np.generic] = _Array1D[ScalarT] | Sequence[ScalarT] +type _ToArray2D[ScalarT: np.generic] = _Array2D[ScalarT] | Sequence[Sequence[ScalarT]] +type _ToArray3D[ScalarT: np.generic] = _Array3D[ScalarT] | Sequence[Sequence[Sequence[ScalarT]]] # Valid time units -_UnitKind: TypeAlias = L[ +type _UnitKind = L[ "Y", "M", "D", @@ -212,7 +208,7 @@ _UnitKind: TypeAlias = L[ "fs", "as", ] -_RollKind: TypeAlias = L[ # `raise` is deliberately excluded +type _RollKind = L[ # `raise` is deliberately excluded "nat", "forward", "following", @@ -222,146 +218,21 @@ _RollKind: TypeAlias = L[ # `raise` is deliberately excluded "modifiedpreceding", ] -@type_check_only -class _SupportsArray(Protocol[_ArrayT_co]): - def __array__(self, /) -> _ArrayT_co: ... +type _ArangeScalar = np.integer | np.floating | np.datetime64 | np.timedelta64 +type _InnerScalar = np.number | np.bool | np.timedelta64 +type _DotScalar = np.number | np.bool -@type_check_only -class _ConstructorEmpty(Protocol): - # 1-D shape - @overload - def __call__( - self, - /, - shape: SupportsIndex, - dtype: None = None, - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> _Array1D[float64]: ... - @overload - def __call__( - self, - /, - shape: SupportsIndex, - dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> ndarray[tuple[int], _DTypeT]: ... - @overload - def __call__( - self, - /, - shape: SupportsIndex, - dtype: type[_ScalarT], - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> _Array1D[_ScalarT]: ... - @overload - def __call__( - self, - /, - shape: SupportsIndex, - dtype: DTypeLike | None = None, - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> _Array1D[Incomplete]: ... +# The datetime functions perform unsafe casts to `datetime64[D]`, +# so a lot of different argument types are allowed here +type _ToDates = dt.date | _NestedSequence[dt.date] +type _ToDeltas = dt.timedelta | _NestedSequence[dt.timedelta] - # known shape - @overload - def __call__( - self, - /, - shape: _AnyShapeT, - dtype: None = None, - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> _Array[_AnyShapeT, float64]: ... - @overload - def __call__( - self, - /, - shape: _AnyShapeT, - dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> ndarray[_AnyShapeT, _DTypeT]: ... - @overload - def __call__( - self, - /, - shape: _AnyShapeT, - dtype: type[_ScalarT], - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> _Array[_AnyShapeT, _ScalarT]: ... - @overload - def __call__( - self, - /, - shape: _AnyShapeT, - dtype: DTypeLike | None = None, - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> _Array[_AnyShapeT, Incomplete]: ... +type _BitOrder = L["big", "little"] +type _MaxWork = L[-1, 0] - # unknown shape - @overload - def __call__( - self, /, - shape: _ShapeLike, - dtype: None = None, - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> NDArray[float64]: ... - @overload - def __call__( - self, /, - shape: _ShapeLike, - dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> ndarray[_AnyShape, _DTypeT]: ... - @overload - def __call__( - self, /, - shape: _ShapeLike, - dtype: type[_ScalarT], - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> NDArray[_ScalarT]: ... - @overload - def __call__( - self, - /, - shape: _ShapeLike, - dtype: DTypeLike | None = None, - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> NDArray[Incomplete]: ... +@type_check_only +class _SupportsArray[ArrayT_co: np.ndarray](Protocol): + def __array__(self, /) -> ArrayT_co: ... # using `Final` or `TypeAlias` will break stubtest error = Exception @@ -391,7 +262,7 @@ set_datetimeparse_function: Final[Callable[..., object]] = ... def get_handler_name(a: NDArray[Any] = ..., /) -> str | None: ... def get_handler_version(a: NDArray[Any] = ..., /) -> int | None: ... def format_longfloat(x: np.longdouble, precision: int) -> str: ... -def scalar(dtype: _DTypeT, object: bytes | object = ...) -> ndarray[tuple[()], _DTypeT]: ... +def scalar[DTypeT: np.dtype](dtype: DTypeT, object: bytes | object = ...) -> ndarray[tuple[()], DTypeT]: ... def set_typeDict(dict_: dict[str, np.dtype], /) -> None: ... typeinfo: Final[dict[str, np.dtype[np.generic]]] = ... @@ -406,12 +277,230 @@ MAY_SHARE_BOUNDS: Final = 0 MAY_SHARE_EXACT: Final = -1 tracemalloc_domain: Final = 389_047 -zeros: Final[_ConstructorEmpty] = ... -empty: Final[_ConstructorEmpty] = ... +# keep in sync with zeros (below) and ones (`_core/numeric.pyi`) +@overload # 1d, float64 default +def empty( + shape: SupportsIndex, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64]: ... +@overload # 1d, specific dtype +def empty[DTypeT: np.dtype]( + shape: SupportsIndex, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[tuple[int], DTypeT]: ... +@overload # 1d, specific scalar type +def empty[ScalarT: np.generic]( + shape: SupportsIndex, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... +@overload # 1d, unknown dtype +def empty( + shape: SupportsIndex, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Incomplete]: ... +@overload # known shape, float64 default +def empty[ShapeT: _Shape]( + shape: ShapeT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, float64]: ... +@overload # known shape, specific dtype +def empty[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[ShapeT, DTypeT]: ... +@overload # known shape, specific scalar type +def empty[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, ScalarT]: ... +@overload # known shape, unknown dtype +def empty[ShapeT: _Shape]( + shape: ShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, Incomplete]: ... +@overload # unknown shape, float64 default +def empty( + shape: _ShapeLike, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[float64]: ... +@overload # unknown shape, specific dtype +def empty[DTypeT: np.dtype]( + shape: _ShapeLike, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[_AnyShape, DTypeT]: ... +@overload # unknown shape, specific scalar type +def empty[ScalarT: np.generic]( + shape: _ShapeLike, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... +@overload # unknown shape, unknown dtype +def empty( + shape: _ShapeLike, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[Incomplete]: ... +# keep in sync with empty (above) and ones (`_core/numeric.pyi`) +@overload # 1d, float64 default +def zeros( + shape: SupportsIndex, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64]: ... +@overload # 1d, specific dtype +def zeros[DTypeT: np.dtype]( + shape: SupportsIndex, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[tuple[int], DTypeT]: ... +@overload # 1d, specific scalar type +def zeros[ScalarT: np.generic]( + shape: SupportsIndex, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... +@overload # 1d, unknown dtype +def zeros( + shape: SupportsIndex, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Incomplete]: ... +@overload # known shape, float64 default +def zeros[ShapeT: _Shape]( + shape: ShapeT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, float64]: ... +@overload # known shape, specific dtype +def zeros[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[ShapeT, DTypeT]: ... +@overload # known shape, specific scalar type +def zeros[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, ScalarT]: ... +@overload # known shape, unknown dtype +def zeros[ShapeT: _Shape]( + shape: ShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, Incomplete]: ... +@overload # unknown shape, float64 default +def zeros( + shape: _ShapeLike, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[float64]: ... +@overload # unknown shape, specific dtype +def zeros[DTypeT: np.dtype]( + shape: _ShapeLike, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[_AnyShape, DTypeT]: ... +@overload # unknown shape, specific scalar type +def zeros[ScalarT: np.generic]( + shape: _ShapeLike, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... +@overload # unknown shape, unknown dtype +def zeros( + shape: _ShapeLike, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[Incomplete]: ... + +# @overload -def empty_like( - prototype: _ArrayT, +def empty_like[ArrayT: np.ndarray]( + prototype: ArrayT, /, dtype: None = None, order: _OrderKACF = "K", @@ -419,10 +508,10 @@ def empty_like( shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def empty_like( - prototype: _ArrayLike[_ScalarT], +def empty_like[ScalarT: np.generic]( + prototype: _ArrayLike[ScalarT], /, dtype: None = None, order: _OrderKACF = "K", @@ -430,18 +519,18 @@ def empty_like( shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def empty_like( +def empty_like[ScalarT: np.generic]( prototype: Incomplete, /, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", subok: bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def empty_like( prototype: Incomplete, @@ -455,8 +544,8 @@ def empty_like( ) -> NDArray[Incomplete]: ... @overload -def array( - object: _ArrayT, +def array[ArrayT: np.ndarray]( + object: ArrayT, dtype: None = None, *, copy: bool | _CopyMode | None = True, @@ -465,10 +554,10 @@ def array( ndmin: int = 0, ndmax: int = 0, like: _SupportsArrayFunc | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def array( - object: _SupportsArray[_ArrayT], +def array[ArrayT: np.ndarray]( + object: _SupportsArray[ArrayT], dtype: None = None, *, copy: bool | _CopyMode | None = True, @@ -477,10 +566,10 @@ def array( ndmin: L[0] = 0, ndmax: int = 0, like: _SupportsArrayFunc | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def array( - object: _ArrayLike[_ScalarT], +def array[ScalarT: np.generic]( + object: _ArrayLike[ScalarT], dtype: None = None, *, copy: bool | _CopyMode | None = True, @@ -489,11 +578,11 @@ def array( ndmin: int = 0, ndmax: int = 0, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def array( +def array[ScalarT: np.generic]( object: Any, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], *, copy: bool | _CopyMode | None = True, order: _OrderKACF = "K", @@ -501,7 +590,7 @@ def array( ndmin: int = 0, ndmax: int = 0, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def array( object: Any, @@ -542,25 +631,25 @@ def normalize_axis_index(axis: int, ndim: int, msg_prefix: str | None = None) -> # NOTE: Allow any sequence of array-like objects @overload -def concatenate( - arrays: _ArrayLike[_ScalarT], +def concatenate[ScalarT: np.generic]( + arrays: _ArrayLike[ScalarT], /, axis: SupportsIndex | None = 0, out: None = None, *, dtype: None = None, casting: _CastingKind | None = "same_kind", -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def concatenate( +def concatenate[ScalarT: np.generic]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = 0, out: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind | None = "same_kind", -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def concatenate( arrays: SupportsLenAndGetItem[ArrayLike], @@ -572,33 +661,127 @@ def concatenate( casting: _CastingKind | None = "same_kind", ) -> NDArray[Incomplete]: ... @overload -def concatenate( +def concatenate[OutT: np.ndarray]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = 0, *, - out: _ArrayT, + out: OutT, dtype: DTypeLike | None = None, casting: _CastingKind | None = "same_kind", -) -> _ArrayT: ... +) -> OutT: ... @overload -def concatenate( +def concatenate[OutT: np.ndarray]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None, - out: _ArrayT, + out: OutT, *, dtype: DTypeLike | None = None, casting: _CastingKind | None = "same_kind", -) -> _ArrayT: ... +) -> OutT: ... -def inner(a: ArrayLike, b: ArrayLike, /) -> Incomplete: ... +# keep in sync with `ma.core.inner` +@overload # (?d T, Nd T) -> 0d|Nd T (workaround) +def inner[ScalarT: _InnerScalar | np.object_](a: _ArrayJustND[ScalarT], b: _ArrayLike[ScalarT], /) -> NDArray[ScalarT] | Any: ... +@overload # (Nd T, ?d T) -> 0d|Nd T (workaround) +def inner[ScalarT: _InnerScalar | np.object_](a: _ArrayLike[ScalarT], b: _ArrayJustND[ScalarT], /) -> NDArray[ScalarT] | Any: ... +@overload # (1d T, 1d T) -> 0d T +def inner[ScalarT: _InnerScalar](a: _ToArray1D[ScalarT], b: _ToArray1D[ScalarT], /) -> ScalarT: ... +@overload # (1d object_, 1d _) -> 0d object +def inner(a: _Array1D[np.object_], b: _Array1D[np.object_] | _ToArray1D[_InnerScalar], /) -> Any: ... +@overload # (1d _, 1d object_) -> 0d object +def inner(a: _ToArray1D[_InnerScalar], b: _Array1D[np.object_], /) -> Any: ... +@overload # (1d bool, 1d bool) -> bool_ +def inner(a: Sequence[bool], b: Sequence[bool], /) -> np.bool: ... +@overload # (1d ~int, 1d +int) -> int_ +def inner(a: list[int], b: Sequence[int], /) -> np.int_: ... +@overload # (1d +int, 1d ~int) -> int_ +def inner(a: Sequence[int], b: list[int], /) -> np.int_: ... +@overload # (1d ~float, 1d +float) -> float64 +def inner(a: list[float], b: Sequence[float], /) -> np.float64: ... +@overload # (1d +float, 1d ~float) -> float64 +def inner(a: Sequence[float], b: list[float], /) -> np.float64: ... +@overload # (1d ~complex, 1d +complex) -> complex128 +def inner(a: list[complex], b: Sequence[complex], /) -> np.complex128: ... +@overload # (1d +complex, 1d ~complex) -> complex128 +def inner(a: Sequence[complex], b: list[complex], /) -> np.complex128: ... +@overload # (1d T, 2d T) -> 1d T +def inner[ScalarT: _InnerScalar | np.object_](a: _ToArray1D[ScalarT], b: _Array2D[ScalarT], /) -> _Array1D[ScalarT]: ... +@overload # (2d T, 1d T) -> 1d T +def inner[ScalarT: _InnerScalar | np.object_](a: _ToArray2D[ScalarT], b: _Array1D[ScalarT], /) -> _Array1D[ScalarT]: ... +@overload # (2d T, 2d T) -> 2d T +def inner[ScalarT: _InnerScalar | np.object_](a: _ToArray2D[ScalarT], b: _Array2D[ScalarT], /) -> _Array2D[ScalarT]: ... +@overload # fallback +def inner(a: ArrayLike, b: ArrayLike, /) -> Any: ... +# keep in sync with `ma.core.dot` +@overload # (?d _, Nd _) -> 0d|Nd _ (workaround) +def dot(a: _ArrayJustND[_DotScalar | np.object_], b: _ArrayLike[_DotScalar | np.object_], out: None = None) -> Any: ... +@overload # (Nd _, ?d _) -> 0d|Nd _ (workaround) +def dot(a: _ArrayLike[_DotScalar | np.object_], b: _ArrayJustND[_DotScalar | np.object_], out: None = None) -> Any: ... +@overload # (1d T, 1d T) -> 0d T +def dot[ScalarT: _DotScalar](a: _ToArray1D[ScalarT], b: _ToArray1D[ScalarT], out: None = None) -> ScalarT: ... +@overload # (1d object_, 1d _) -> 0d object +def dot(a: _Array1D[np.object_], b: _Array1D[np.object_] | _ToArray1D[_DotScalar], out: None = None) -> Any: ... +@overload # (1d _, 1d object_) -> 0d object +def dot(a: _ToArray1D[_DotScalar], b: _Array1D[np.object_], out: None = None) -> Any: ... +@overload # (1d bool, 1d bool) -> bool_ +def dot(a: Sequence[bool], b: Sequence[bool], out: None = None) -> np.bool: ... +@overload # (1d ~int, 1d +int) -> int_ +def dot(a: list[int], b: Sequence[int], out: None = None) -> np.int_: ... +@overload # (1d +int, 1d ~int) -> int_ +def dot(a: Sequence[int], b: list[int], out: None = None) -> np.int_: ... +@overload # (1d ~float, 1d +float) -> float64 +def dot(a: list[float], b: Sequence[float], out: None = None) -> np.float64: ... +@overload # (1d +float, 1d ~float) -> float64 +def dot(a: Sequence[float], b: list[float], out: None = None) -> np.float64: ... +@overload # (1d ~complex, 1d +complex) -> complex128 +def dot(a: list[complex], b: Sequence[complex], out: None = None) -> np.complex128: ... +@overload # (1d +complex, 1d ~complex) -> complex128 +def dot(a: Sequence[complex], b: list[complex], out: None = None) -> np.complex128: ... +@overload # (1d T, 2d T) -> 1d T +def dot[ScalarT: _DotScalar | np.object_]( + a: _ToArray1D[ScalarT], b: _ToArray2D[ScalarT], out: None = None +) -> _Array1D[ScalarT]: ... +@overload # (2d T, 1d T) -> 1d T +def dot[ScalarT: _DotScalar | np.object_]( + a: _ToArray2D[ScalarT], b: _ToArray1D[ScalarT], out: None = None +) -> _Array1D[ScalarT]: ... +@overload # (2d T, 2d T) -> 2d T +def dot[ScalarT: _DotScalar | np.object_]( + a: _ToArray2D[ScalarT], b: _ToArray2D[ScalarT], out: None = None +) -> _Array2D[ScalarT]: ... +@overload # (2d T, ?d T) -> >=1d T +def dot[ScalarT: _DotScalar | np.object_]( + a: _ToArray2D[ScalarT], b: _ArrayLike[ScalarT], out: None = None +) -> NDArray[ScalarT]: ... +@overload # (?d T, 2d T) -> >=1d T +def dot[ScalarT: _DotScalar | np.object_]( + a: _ArrayLike[ScalarT], b: _ToArray2D[ScalarT], out: None = None +) -> NDArray[ScalarT]: ... @overload -def where(condition: ArrayLike, x: None = None, y: None = None, /) -> tuple[NDArray[intp], ...]: ... +def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Incomplete: ... +@overload +def dot[OutT: np.ndarray](a: ArrayLike, b: ArrayLike, out: OutT) -> OutT: ... + +# keep in sync with `ma.core.where` and the 1-arg overloads with `_core.fromnumeric.nonzerp` +@overload # (?d) (workaround) +def where(condition: _ArrayJustND[Any], x: None = None, y: None = None, /) -> tuple[_Array1D[np.intp], ...]: ... +@overload # (1d) +def where(condition: _ToArray1D[Any], x: None = None, y: None = None, /) -> tuple[_Array1D[np.intp]]: ... +@overload # (2d) +def where(condition: _ToArray2D[Any], x: None = None, y: None = None, /) -> tuple[_Array1D[np.intp], _Array1D[np.intp]]: ... +@overload # (3d) +def where( + condition: _ToArray3D[Any], x: None = None, y: None = None, / +) -> tuple[_Array1D[np.intp], _Array1D[np.intp], _Array1D[np.intp]]: ... +@overload # (Nd) (fallback) +def where(condition: _ArrayLike[Any], x: None = None, y: None = None, /) -> tuple[_Array1D[np.intp], ...]: ... @overload def where(condition: ArrayLike, x: ArrayLike, y: ArrayLike, /) -> NDArray[Incomplete]: ... +# def lexsort(keys: ArrayLike, axis: SupportsIndex = -1) -> NDArray[intp]: ... def can_cast(from_: ArrayLike | DTypeLike, to: DTypeLike, casting: _CastingKind = "safe") -> bool: ... @@ -606,11 +789,6 @@ def can_cast(from_: ArrayLike | DTypeLike, to: DTypeLike, casting: _CastingKind def min_scalar_type(a: ArrayLike, /) -> dtype: ... def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike | None) -> dtype: ... -@overload -def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Incomplete: ... -@overload -def dot(a: ArrayLike, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... - @overload def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... @overload @@ -628,15 +806,15 @@ def vdot(a: _ArrayLikeObject_co, b: object, /) -> Any: ... @overload def vdot(a: object, b: _ArrayLikeObject_co, /) -> Any: ... -def bincount(x: ArrayLike, /, weights: ArrayLike | None = None, minlength: SupportsIndex = 0) -> NDArray[intp]: ... +# +def bincount(x: _ArrayLikeInt_co, /, weights: ArrayLike | None = None, minlength: SupportsIndex = 0) -> _Array1D[intp]: ... +# def copyto(dst: ndarray, src: ArrayLike, casting: _CastingKind = "same_kind", where: object = True) -> None: ... def putmask(a: ndarray, /, mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... -_BitOrder: TypeAlias = L["big", "little"] - @overload -def packbits(a: _ArrayLikeInt_co, /, axis: None = None, bitorder: _BitOrder = "big") -> ndarray[tuple[int], dtype[uint8]]: ... +def packbits(a: _ArrayLikeInt_co, /, axis: None = None, bitorder: _BitOrder = "big") -> _Array1D[uint8]: ... @overload def packbits(a: _ArrayLikeInt_co, /, axis: SupportsIndex, bitorder: _BitOrder = "big") -> NDArray[uint8]: ... @@ -647,7 +825,7 @@ def unpackbits( axis: None = None, count: SupportsIndex | None = None, bitorder: _BitOrder = "big", -) -> ndarray[tuple[int], dtype[uint8]]: ... +) -> _Array1D[uint8]: ... @overload def unpackbits( a: _ArrayLike[uint8], @@ -657,73 +835,223 @@ def unpackbits( bitorder: _BitOrder = "big", ) -> NDArray[uint8]: ... -_MaxWork: TypeAlias = L[-1, 0] - # any two python objects will be accepted, not just `ndarray`s def shares_memory(a: object, b: object, /, max_work: _MaxWork = -1) -> bool: ... def may_share_memory(a: object, b: object, /, max_work: _MaxWork = 0) -> bool: ... -@overload +# +@overload # ndarray +def asarray[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + dtype: None = None, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # ndarray, dtype: +def asarray[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT], + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # ndarray, dtype: +def asarray[ShapeT: _Shape]( + a: np.ndarray[ShapeT], + dtype: DTypeLike, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[ShapeT]: ... +@overload # 1d bool def asarray( - a: _ArrayLike[_ScalarT], + a: Sequence[bool], dtype: None = None, - order: _OrderKACF = ..., + order: _OrderKACF = None, *, - device: L["cpu"] | None = ..., - copy: bool | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... -@overload + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.bool]: ... +@overload # 1d ~int def asarray( - a: Any, - dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., + a: list[int], + dtype: None = None, + order: _OrderKACF = None, *, - device: L["cpu"] | None = ..., - copy: bool | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... -@overload + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.int_]: ... +@overload # 1d ~float def asarray( - a: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., + a: list[float], + dtype: None = None, + order: _OrderKACF = None, *, - device: L["cpu"] | None = ..., - copy: bool | None = ..., - like: _SupportsArrayFunc | None = ..., + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64]: ... +@overload # 1d ~complex +def asarray( + a: list[complex], + dtype: None = None, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.complex128]: ... +@overload # 1d _, dtype: +def asarray[ScalarT: np.generic]( + a: Sequence[complex | np.generic], + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... +@overload # 1d _, dtype: +def asarray( + a: Sequence[complex | np.generic], + dtype: DTypeLike, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Any]: ... +@overload # 2d bool +def asarray( + a: Sequence[Sequence[bool]], + dtype: None = None, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[np.bool]: ... +@overload # 2d ~int +def asarray( + a: Sequence[list[int]], + dtype: None = None, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[np.int_]: ... +@overload # 2d ~float +def asarray( + a: Sequence[list[float]], + dtype: None = None, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[np.float64]: ... +@overload # 2d ~complex +def asarray( + a: Sequence[list[complex]], + dtype: None = None, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[np.complex128]: ... +@overload # 2d _, dtype: +def asarray[ScalarT: np.generic]( + a: Sequence[Sequence[complex | np.generic]], + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[ScalarT]: ... +@overload # 2d _, dtype: +def asarray( + a: Sequence[Sequence[complex | np.generic]], + dtype: DTypeLike, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[Any]: ... +@overload # known array-like +def asarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... +@overload # array-like, dtype: +def asarray[ScalarT: np.generic]( + a: object, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... +@overload # fallback +def asarray( + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... +# @overload -def asanyarray( - a: _ArrayT, # Preserve subclass-information +def asanyarray[ArrayT: np.ndarray]( + a: ArrayT, # Preserve subclass-information dtype: None = None, order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def asanyarray( - a: _ArrayLike[_ScalarT], +def asanyarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def asanyarray( +def asanyarray[ScalarT: np.generic]( a: Any, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def asanyarray( a: Any, @@ -735,50 +1063,67 @@ def asanyarray( like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... +# @overload -def ascontiguousarray( - a: _ArrayLike[_ScalarT], +def ascontiguousarray[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], dtype: None = None, *, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... @overload -def ascontiguousarray( - a: Any, - dtype: _DTypeLike[_ScalarT], +def ascontiguousarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, *, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... +@overload +def ascontiguousarray[ScalarT: np.generic]( + a: object, + dtype: _DTypeLike[ScalarT], + *, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... @overload def ascontiguousarray( - a: Any, - dtype: DTypeLike | None = ..., + a: object, + dtype: DTypeLike | None = None, *, - like: _SupportsArrayFunc | None = ..., + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... +# @overload -def asfortranarray( - a: _ArrayLike[_ScalarT], +def asfortranarray[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], dtype: None = None, *, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... @overload -def asfortranarray( - a: Any, - dtype: _DTypeLike[_ScalarT], +def asfortranarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, *, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... +@overload +def asfortranarray[ScalarT: np.generic]( + a: object, + dtype: _DTypeLike[ScalarT], + *, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... @overload def asfortranarray( - a: Any, - dtype: DTypeLike | None = ..., + a: object, + dtype: DTypeLike | None = None, *, - like: _SupportsArrayFunc | None = ..., + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... +# def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype: ... # `sep` is a de facto mandatory argument, as its default value is deprecated @@ -786,94 +1131,95 @@ def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype: ... def fromstring( string: str | bytes, dtype: None = None, - count: SupportsIndex = ..., + count: SupportsIndex = -1, *, sep: str, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[float64]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[float64]: ... @overload -def fromstring( +def fromstring[ScalarT: np.generic]( string: str | bytes, - dtype: _DTypeLike[_ScalarT], - count: SupportsIndex = ..., + dtype: _DTypeLike[ScalarT], + count: SupportsIndex = -1, *, sep: str, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... @overload def fromstring( string: str | bytes, - dtype: DTypeLike | None = ..., - count: SupportsIndex = ..., + dtype: DTypeLike | None = None, + count: SupportsIndex = -1, *, sep: str, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[Any]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Any]: ... +# @overload -def frompyfunc( # type: ignore[overload-overlap] - func: Callable[[Any], _ReturnType], /, +def frompyfunc[ReturnT]( + func: Callable[[Any], ReturnT], /, nin: L[1], nout: L[1], *, identity: None = None, -) -> _PyFunc_Nin1_Nout1[_ReturnType, None]: ... +) -> _PyFunc_Nin1_Nout1[ReturnT, None]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] - func: Callable[[Any], _ReturnType], /, +def frompyfunc[ReturnT, IdentityT]( + func: Callable[[Any], ReturnT], /, nin: L[1], nout: L[1], *, - identity: _IDType, -) -> _PyFunc_Nin1_Nout1[_ReturnType, _IDType]: ... + identity: IdentityT, +) -> _PyFunc_Nin1_Nout1[ReturnT, IdentityT]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] - func: Callable[[Any, Any], _ReturnType], /, +def frompyfunc[ReturnT]( + func: Callable[[Any, Any], ReturnT], /, nin: L[2], nout: L[1], *, identity: None = None, -) -> _PyFunc_Nin2_Nout1[_ReturnType, None]: ... +) -> _PyFunc_Nin2_Nout1[ReturnT, None]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] - func: Callable[[Any, Any], _ReturnType], /, +def frompyfunc[ReturnT, IdentityT]( + func: Callable[[Any, Any], ReturnT], /, nin: L[2], nout: L[1], *, - identity: _IDType, -) -> _PyFunc_Nin2_Nout1[_ReturnType, _IDType]: ... + identity: IdentityT, +) -> _PyFunc_Nin2_Nout1[ReturnT, IdentityT]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] - func: Callable[..., _ReturnType], /, - nin: _Nin, +def frompyfunc[ReturnT, NInT: int]( + func: Callable[..., ReturnT], /, + nin: NInT, nout: L[1], *, identity: None = None, -) -> _PyFunc_Nin3P_Nout1[_ReturnType, None, _Nin]: ... +) -> _PyFunc_Nin3P_Nout1[ReturnT, None, NInT]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] - func: Callable[..., _ReturnType], /, - nin: _Nin, +def frompyfunc[ReturnT, NInT: int, IdentityT]( + func: Callable[..., ReturnT], /, + nin: NInT, nout: L[1], *, - identity: _IDType, -) -> _PyFunc_Nin3P_Nout1[_ReturnType, _IDType, _Nin]: ... + identity: IdentityT, +) -> _PyFunc_Nin3P_Nout1[ReturnT, IdentityT, NInT]: ... @overload -def frompyfunc( - func: Callable[..., _2PTuple[_ReturnType]], /, - nin: _Nin, - nout: _Nout, +def frompyfunc[ReturnT, NInT: int, NOutT: int]( + func: Callable[..., _2PTuple[ReturnT]], /, + nin: NInT, + nout: NOutT, *, identity: None = None, -) -> _PyFunc_Nin1P_Nout2P[_ReturnType, None, _Nin, _Nout]: ... +) -> _PyFunc_Nin1P_Nout2P[ReturnT, None, NInT, NOutT]: ... @overload -def frompyfunc( - func: Callable[..., _2PTuple[_ReturnType]], /, - nin: _Nin, - nout: _Nout, +def frompyfunc[ReturnT, NInT: int, NOutT: int, IdentityT]( + func: Callable[..., _2PTuple[ReturnT]], /, + nin: NInT, + nout: NOutT, *, - identity: _IDType, -) -> _PyFunc_Nin1P_Nout2P[_ReturnType, _IDType, _Nin, _Nout]: ... + identity: IdentityT, +) -> _PyFunc_Nin1P_Nout2P[ReturnT, IdentityT, NInT, NOutT]: ... @overload def frompyfunc( func: Callable[..., Any], /, @@ -892,17 +1238,17 @@ def fromfile( offset: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[float64]: ... +) -> _Array1D[float64]: ... @overload -def fromfile( +def fromfile[ScalarT: np.generic]( file: StrOrBytesPath | _SupportsFileMethods, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], count: SupportsIndex = ..., sep: str = ..., offset: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> _Array1D[ScalarT]: ... @overload def fromfile( file: StrOrBytesPath | _SupportsFileMethods, @@ -912,69 +1258,108 @@ def fromfile( offset: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[Any]: ... +) -> _Array1D[Any]: ... -@overload +# +@overload # dtype= +def fromiter[ScalarT: np.generic]( + iter: Iterable[object], + dtype: _DTypeLike[ScalarT], + count: SupportsIndex = -1, + *, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... +@overload # dtype=None def fromiter( - iter: Iterable[Any], - dtype: _DTypeLike[_ScalarT], - count: SupportsIndex = ..., + iter: Iterable[object], + dtype: None, + count: SupportsIndex = -1, *, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... -@overload + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64]: ... +@overload # dtype=bool def fromiter( - iter: Iterable[Any], - dtype: DTypeLike | None, - count: SupportsIndex = ..., + iter: Iterable[object], + dtype: type[bool], + count: SupportsIndex = -1, *, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[Any]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.bool]: ... +@overload # dtype=int +def fromiter( + iter: Iterable[object], + dtype: type[int], + count: SupportsIndex = -1, + *, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.int_ | Any]: ... +@overload # dtype=float +def fromiter( + iter: Iterable[object], + dtype: type[float], + count: SupportsIndex = -1, + *, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64 | Any]: ... +@overload # dtype=complex +def fromiter( + iter: Iterable[object], + dtype: type[complex], + count: SupportsIndex = -1, + *, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.complex128 | Any]: ... +@overload # dtype= +def fromiter( + iter: Iterable[object], + dtype: DTypeLike, + count: SupportsIndex = -1, + *, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Any]: ... +# @overload def frombuffer( - buffer: _SupportsBuffer, + buffer: Buffer, dtype: None = None, - count: SupportsIndex = ..., - offset: SupportsIndex = ..., + count: SupportsIndex = -1, + offset: SupportsIndex = 0, *, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[float64]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[float64]: ... @overload -def frombuffer( - buffer: _SupportsBuffer, - dtype: _DTypeLike[_ScalarT], - count: SupportsIndex = ..., - offset: SupportsIndex = ..., +def frombuffer[ScalarT: np.generic]( + buffer: Buffer, + dtype: _DTypeLike[ScalarT], + count: SupportsIndex = -1, + offset: SupportsIndex = 0, *, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... @overload def frombuffer( - buffer: _SupportsBuffer, - dtype: DTypeLike | None = ..., - count: SupportsIndex = ..., - offset: SupportsIndex = ..., + buffer: Buffer, + dtype: DTypeLike | None = None, + count: SupportsIndex = -1, + offset: SupportsIndex = 0, *, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[Any]: ... - -_ArangeScalar: TypeAlias = np.integer | np.floating | np.datetime64 | np.timedelta64 -_ArangeScalarT = TypeVar("_ArangeScalarT", bound=_ArangeScalar) + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Any]: ... # keep in sync with ma.core.arange # NOTE: The `float64 | Any` return types needed to avoid incompatible overlapping overloads @overload # dtype= -def arange( +def arange[ScalarT: _ArangeScalar]( start_or_stop: _ArangeScalar | float, /, stop: _ArangeScalar | float | None = None, step: _ArangeScalar | float | None = 1, *, - dtype: _DTypeLike[_ArangeScalarT], + dtype: _DTypeLike[ScalarT], device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array1D[_ArangeScalarT]: ... +) -> _Array1D[ScalarT]: ... @overload # (int-like, int-like?, int-like?) def arange( start_or_stop: _IntLike_co, @@ -1041,11 +1426,22 @@ def arange( device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, ) -> _Array1D[np.datetime64[Incomplete]]: ... +@overload # (str, str, timedelta-like, dtype=dt64-like) (requires both start and stop) +def arange( + start_or_stop: str, + /, + stop: str, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.datetime64] | _DT64Codes, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.datetime64[Incomplete]]: ... @overload # dtype= def arange( - start_or_stop: _ArangeScalar | float, + start_or_stop: _ArangeScalar | float | str, /, - stop: _ArangeScalar | float | None = None, + stop: _ArangeScalar | float | str | None = None, step: _ArangeScalar | float | None = 1, *, dtype: DTypeLike | None = None, @@ -1056,12 +1452,23 @@ def arange( # def datetime_data(dtype: str | _DTypeLike[datetime64 | timedelta64], /) -> tuple[str, int]: ... -# The datetime functions perform unsafe casts to `datetime64[D]`, -# so a lot of different argument types are allowed here +# +@final +class busdaycalendar: + __module__: ClassVar[L["numpy"]] = "numpy" # type: ignore[misc] # pyright: ignore[reportIncompatibleVariableOverride] -_ToDates: TypeAlias = dt.date | _NestedSequence[dt.date] -_ToDeltas: TypeAlias = dt.timedelta | _NestedSequence[dt.timedelta] + def __init__( + self, + /, + weekmask: str | Sequence[_IntLike_co] | _SupportsArray[NDArray[np.bool | np.integer]] = "1111100", + holidays: Sequence[dt.date | np.datetime64[dt.date]] | _SupportsArray[NDArray[np.datetime64[dt.date]]] | None = None, + ) -> None: ... + @property + def weekmask(self) -> _Array1D[np.bool]: ... + @property + def holidays(self) -> _Array1D[np.datetime64[dt.date]]: ... +# @overload def busday_count( begindates: _ScalarLike_co | dt.date, @@ -1081,24 +1488,24 @@ def busday_count( out: None = None, ) -> NDArray[int_]: ... @overload -def busday_count( +def busday_count[OutT: np.ndarray]( begindates: ArrayLike | _ToDates, enddates: ArrayLike | _ToDates, weekmask: ArrayLike = "1111100", holidays: ArrayLike | _ToDates = (), busdaycal: busdaycalendar | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload -def busday_count( +def busday_count[OutT: np.ndarray]( begindates: ArrayLike | _ToDates, enddates: ArrayLike | _ToDates, weekmask: ArrayLike, holidays: ArrayLike | _ToDates, busdaycal: busdaycalendar | None, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... # `roll="raise"` is (more or less?) equivalent to `casting="safe"` @overload @@ -1122,7 +1529,7 @@ def busday_offset( out: None = None, ) -> NDArray[datetime64]: ... @overload -def busday_offset( +def busday_offset[OutT: np.ndarray]( dates: _ArrayLike[datetime64] | _ToDates, offsets: _ArrayLikeTD64_co | _ToDeltas, roll: L["raise"] = "raise", @@ -1130,18 +1537,18 @@ def busday_offset( holidays: ArrayLike | _ToDates | None = None, busdaycal: busdaycalendar | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload -def busday_offset( +def busday_offset[OutT: np.ndarray]( dates: _ArrayLike[datetime64] | _ToDates, offsets: _ArrayLikeTD64_co | _ToDeltas, roll: L["raise"], weekmask: ArrayLike, holidays: ArrayLike | _ToDates | None, busdaycal: busdaycalendar | None, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload def busday_offset( dates: _ScalarLike_co | dt.date, @@ -1163,7 +1570,7 @@ def busday_offset( out: None = None, ) -> NDArray[datetime64]: ... @overload -def busday_offset( +def busday_offset[OutT: np.ndarray]( dates: ArrayLike | _ToDates, offsets: ArrayLike | _ToDeltas, roll: _RollKind, @@ -1171,18 +1578,18 @@ def busday_offset( holidays: ArrayLike | _ToDates | None = None, busdaycal: busdaycalendar | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload -def busday_offset( +def busday_offset[OutT: np.ndarray]( dates: ArrayLike | _ToDates, offsets: ArrayLike | _ToDeltas, roll: _RollKind, weekmask: ArrayLike, holidays: ArrayLike | _ToDates | None, busdaycal: busdaycalendar | None, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload def is_busday( @@ -1201,24 +1608,24 @@ def is_busday( out: None = None, ) -> NDArray[np.bool]: ... @overload -def is_busday( +def is_busday[OutT: np.ndarray]( dates: ArrayLike | _ToDates, weekmask: ArrayLike = "1111100", holidays: ArrayLike | _ToDates | None = None, busdaycal: busdaycalendar | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload -def is_busday( +def is_busday[OutT: np.ndarray]( dates: ArrayLike | _ToDates, weekmask: ArrayLike, holidays: ArrayLike | _ToDates | None, busdaycal: busdaycalendar | None, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... -_TimezoneContext: TypeAlias = L["naive", "UTC", "local"] | dt.tzinfo +type _TimezoneContext = L["naive", "UTC", "local"] | dt.tzinfo @overload def datetime_as_string( @@ -1252,7 +1659,7 @@ def compare_chararrays( def add_docstring(obj: Callable[..., Any], docstring: str, /) -> None: ... -_GetItemKeys: TypeAlias = L[ +type _GetItemKeys = L[ "C", "CONTIGUOUS", "C_CONTIGUOUS", "F", "FORTRAN", "F_CONTIGUOUS", "W", "WRITEABLE", @@ -1265,7 +1672,7 @@ _GetItemKeys: TypeAlias = L[ "FNC", "FORC", ] -_SetItemKeys: TypeAlias = L[ +type _SetItemKeys = L[ "A", "ALIGNED", "W", "WRITEABLE", "X", "WRITEBACKIFCOPY", @@ -1301,8 +1708,165 @@ class flagsobj: def num(self) -> int: ... @property def owndata(self) -> bool: ... - def __getitem__(self, key: _GetItemKeys) -> bool: ... - def __setitem__(self, key: _SetItemKeys, value: bool) -> None: ... + def __getitem__(self, key: _GetItemKeys, /) -> bool: ... + def __setitem__(self, key: _SetItemKeys, value: bool, /) -> None: ... + +@final +class flatiter(Generic[_ArrayT_co]): + __module__: ClassVar[L["numpy"]] = "numpy" # type: ignore[misc] # pyright: ignore[reportIncompatibleVariableOverride] + __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + + @property + def base(self, /) -> _ArrayT_co: ... + @property + def coords[ShapeT: _Shape](self: flatiter[np.ndarray[ShapeT]], /) -> ShapeT: ... + @property + def index(self, /) -> int: ... + + # iteration + def __len__(self, /) -> int: ... + def __iter__(self, /) -> Self: ... + def __next__[ScalarT: np.generic](self: flatiter[NDArray[ScalarT]], /) -> ScalarT: ... + + # indexing + @overload # nd: _[()] + def __getitem__(self, key: tuple[()], /) -> _ArrayT_co: ... + @overload # 0d; _[] + def __getitem__[ScalarT: np.generic](self: flatiter[NDArray[ScalarT]], key: int | np.integer, /) -> ScalarT: ... + @overload # 1d; _[[*]], _[:], _[...] + def __getitem__[DTypeT: dtype]( + self: flatiter[np.ndarray[Any, DTypeT]], + key: list[int] | slice | EllipsisType | flatiter[NDArray[np.integer]], + /, + ) -> ndarray[tuple[int], DTypeT]: ... + @overload # 2d; _[[*[*]]] + def __getitem__[DTypeT: dtype]( + self: flatiter[np.ndarray[Any, DTypeT]], + key: list[list[int]], + /, + ) -> ndarray[tuple[int, int], DTypeT]: ... + @overload # ?d + def __getitem__[DTypeT: dtype]( + self: flatiter[np.ndarray[Any, DTypeT]], + key: NDArray[np.integer] | _NestedSequence[int], + /, + ) -> ndarray[_AnyShape, DTypeT]: ... + + # NOTE: `__setitem__` operates via `unsafe` casting rules, and can thus accept any + # type accepted by the relevant underlying `np.generic` constructor, which isn't + # known statically. So we cannot meaningfully annotate the value parameter. + def __setitem__(self, key: slice | EllipsisType | _ArrayLikeInt, val: object, /) -> None: ... + + # NOTE: `dtype` and `copy` are no-ops at runtime, so we don't support them here to + # avoid confusion + def __array__[DTypeT: dtype]( + self: flatiter[np.ndarray[Any, DTypeT]], + dtype: None = None, + /, + *, + copy: None = None, + ) -> ndarray[tuple[int], DTypeT]: ... + + # This returns a flat copy of the underlying array, not of the iterator itself + def copy[DTypeT: dtype](self: flatiter[np.ndarray[Any, DTypeT]], /) -> ndarray[tuple[int], DTypeT]: ... + +@final +class nditer: + __module__: ClassVar[L["numpy"]] = "numpy" # type: ignore[misc] # pyright: ignore[reportIncompatibleVariableOverride] + + @overload + def __init__( + self, + /, + op: ArrayLike, + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[_NDIterFlagsOp] | None = None, + op_dtypes: DTypeLike | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[SupportsIndex] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... + @overload + def __init__( + self, + /, + op: Sequence[ArrayLike | None], + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = None, + op_dtypes: Sequence[DTypeLike | None] | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[Sequence[SupportsIndex]] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... + + # + def __enter__(self, /) -> nditer: ... + def __exit__(self, cls: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None, /) -> None: ... + + # + def __iter__(self) -> nditer: ... + def __next__(self) -> tuple[NDArray[Incomplete], ...]: ... + def __len__(self) -> int: ... + + # + @overload + def __getitem__(self, index: SupportsIndex, /) -> NDArray[Incomplete]: ... + @overload + def __getitem__(self, index: slice, /) -> tuple[NDArray[Incomplete], ...]: ... + def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike, /) -> None: ... + + # + def __copy__(self) -> Self: ... + def copy(self) -> Self: ... + + # + def close(self) -> None: ... + def debug_print(self) -> None: ... + def enable_external_loop(self) -> None: ... + def iternext(self) -> bool: ... + def remove_axis(self, i: SupportsIndex, /) -> None: ... + def remove_multi_index(self) -> None: ... + def reset(self) -> None: ... + + # + @property + def dtypes(self) -> tuple[np.dtype[Incomplete], ...]: ... + @property + def finished(self) -> bool: ... + @property + def has_delayed_bufalloc(self) -> bool: ... + @property + def has_index(self) -> bool: ... + @property + def has_multi_index(self) -> bool: ... + @property + def index(self) -> int: ... + @property + def iterationneedsapi(self) -> bool: ... + @property + def iterindex(self) -> int: ... + @property + def iterrange(self) -> tuple[int, ...]: ... + @property + def itersize(self) -> int: ... + @property + def itviews(self) -> tuple[NDArray[Incomplete], ...]: ... + @property + def multi_index(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + @property + def nop(self) -> int: ... + @property + def operands(self) -> tuple[NDArray[Incomplete], ...]: ... + @property + def shape(self) -> tuple[int, ...]: ... + @property + def value(self) -> tuple[NDArray[Incomplete], ...]: ... def nested_iters( op: ArrayLike | Sequence[ArrayLike], diff --git a/numpy/_core/npymath.ini.in b/numpy/_core/npymath.ini.in deleted file mode 100644 index a233b8f3bfa9..000000000000 --- a/numpy/_core/npymath.ini.in +++ /dev/null @@ -1,20 +0,0 @@ -[meta] -Name=npymath -Description=Portable, core math library implementing C99 standard -Version=0.1 - -[variables] -pkgname=@pkgname@ -prefix=${pkgdir} -libdir=${prefix}@sep@lib -includedir=${prefix}@sep@include - -[default] -Libs=-L${libdir} -lnpymath -Cflags=-I${includedir} -Requires=mlib - -[msvc] -Libs=/LIBPATH:${libdir} npymath.lib -Cflags=/INCLUDE:${includedir} -Requires=mlib diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 11527c9de442..6bd03ae75c5d 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -151,7 +151,7 @@ def zeros_like( array([[0, 0, 0], [0, 0, 0]]) - >>> y = np.arange(3, dtype=float) + >>> y = np.arange(3, dtype=np.float64) >>> y array([0., 1., 2.]) >>> np.zeros_like(y) @@ -211,7 +211,7 @@ def ones(shape, dtype=None, order='C', *, device=None, like=None): >>> np.ones(5) array([1., 1., 1., 1., 1.]) - >>> np.ones((5,), dtype=int) + >>> np.ones((5,), dtype=np.int_) array([1, 1, 1, 1, 1]) >>> np.ones((2, 1)) @@ -300,7 +300,7 @@ def ones_like( array([[1, 1, 1], [1, 1, 1]]) - >>> y = np.arange(3, dtype=float) + >>> y = np.arange(3, dtype=np.float64) >>> y array([0., 1., 2.]) >>> np.ones_like(y) @@ -448,21 +448,21 @@ def full_like( Examples -------- >>> import numpy as np - >>> x = np.arange(6, dtype=int) + >>> x = np.arange(6, dtype=np.int_) >>> np.full_like(x, 1) array([1, 1, 1, 1, 1, 1]) >>> np.full_like(x, 0.1) array([0, 0, 0, 0, 0, 0]) - >>> np.full_like(x, 0.1, dtype=np.double) + >>> np.full_like(x, 0.1, dtype=np.float64) array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) - >>> np.full_like(x, np.nan, dtype=np.double) + >>> np.full_like(x, np.nan, dtype=np.float64) array([nan, nan, nan, nan, nan, nan]) - >>> y = np.arange(6, dtype=np.double) + >>> y = np.arange(6, dtype=np.float64) >>> np.full_like(y, 0.1) array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) - >>> y = np.zeros([2, 2, 3], dtype=int) + >>> y = np.zeros([2, 2, 3], dtype=np.int_) >>> np.full_like(y, [0, 0, 255]) array([[[ 0, 0, 255], [ 0, 0, 255]], @@ -485,15 +485,10 @@ def count_nonzero(a, axis=None, *, keepdims=False): """ Counts the number of non-zero values in the array ``a``. - The word "non-zero" is in reference to the Python 2.x - built-in method ``__nonzero__()`` (renamed ``__bool__()`` - in Python 3.x) of Python objects that tests an object's - "truthfulness". For example, any number is considered - truthful if it is nonzero, whereas any string is considered - truthful if it is not the empty string. Thus, this function - (recursively) counts how many elements in ``a`` (and in - sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()`` - method evaluated to ``True``. + A non-zero value is one that evaluates to truthful in a boolean + context, including any non-zero number and any string that + is not empty. This function recursively counts how many elements + in ``a`` (and its sub-arrays) are non-zero values. Parameters ---------- @@ -982,7 +977,7 @@ def outer(a, b, out=None): An example using a "vector" of letters: - >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> x = np.array(['a', 'b', 'c'], dtype=np.object_) >>> np.outer(x, [1, 2, 3]) array([['a', 'aa', 'aaa'], ['b', 'bb', 'bbb'], @@ -1022,7 +1017,8 @@ def tensordot(a, b, axes=2): * (2,) array_like Or, a list of axes to be summed over, first sequence applying to `a`, second to `b`. Both elements array_like must be of the same length. - + Each axis may appear at most once; repeated axes are not allowed. + For example, ``axes=([1, 1], [0, 0])`` is invalid. Returns ------- output : ndarray @@ -1053,6 +1049,13 @@ def tensordot(a, b, axes=2): first in both sequences, the second axis second, and so forth. The calculation can be referred to ``numpy.einsum``. + For example, if ``a.shape == (2, 3, 4)`` and ``b.shape == (3, 4, 5)``, + then ``axes=([1, 2], [0, 1])`` sums over the ``(3, 4)`` dimensions of + both arrays and produces an output of shape ``(2, 5)``. + + Each summation axis corresponds to a distinct contraction index; repeating + an axis (for example ``axes=([1, 1], [0, 0])``) is invalid. + The shape of the result consists of the non-contracted axes of the first tensor, followed by the non-contracted axes of the second. @@ -1107,7 +1110,7 @@ def tensordot(a, b, axes=2): An extended example taking advantage of the overloading of + and \\*: >>> a = np.array(range(1, 9)).reshape((2, 2, 2)) - >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) + >>> A = np.array(('a', 'b', 'c', 'd'), dtype=np.object_) >>> A = A.reshape((2, 2)) >>> a; A array([[[1, 2], @@ -1170,6 +1173,11 @@ def tensordot(a, b, axes=2): axes_b = [axes_b] nb = 1 + if len(set(axes_a)) != len(axes_a): + raise ValueError("duplicate axes are not allowed in tensordot") + if len(set(axes_b)) != len(axes_b): + raise ValueError("duplicate axes are not allowed in tensordot") + a, b = asarray(a), asarray(b) as_ = a.shape nda = a.ndim @@ -1559,10 +1567,7 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors are defined by the last axis of `a` and `b` by default, and these axes - can have dimensions 2 or 3. Where the dimension of either `a` or `b` is - 2, the third component of the input vector is assumed to be zero and the - cross product calculated accordingly. In cases where both input vectors - have dimension 2, the z-component of the cross product is returned. + must have 3 dimensions. Parameters ---------- @@ -1575,9 +1580,7 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): axisb : int, optional Axis of `b` that defines the vector(s). By default, the last axis. axisc : int, optional - Axis of `c` containing the cross product vector(s). Ignored if - both input vectors have dimension 2, as the return is scalar. - By default, the last axis. + Axis of `c` containing the cross product vector(s). By default, the last axis. axis : int, optional If defined, the axis of `a`, `b` and `c` that defines the vector(s) and cross product(s). Overrides `axisa`, `axisb` and `axisc`. @@ -1590,27 +1593,19 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): Raises ------ ValueError - When the dimension of the vector(s) in `a` and/or `b` does not - equal 2 or 3. + When the dimension of the vector(s) in `a` or `b` does not equal 3. See Also -------- inner : Inner product outer : Outer product. - linalg.cross : An Array API compatible variation of ``np.cross``, - which accepts (arrays of) 3-element vectors only. + linalg.cross : An Array API compatible variation of ``np.cross``. ix_ : Construct index arrays. Notes ----- Supports full broadcasting of the inputs. - Dimension-2 input arrays were deprecated in 2.0.0. If you do need this - functionality, you can use:: - - def cross2d(x, y): - return x[..., 0] * y[..., 1] - x[..., 1] * y[..., 0] - Examples -------- Vector cross-product. @@ -1623,13 +1618,6 @@ def cross2d(x, y): One vector with dimension 2. - >>> x = [1, 2] - >>> y = [4, 5, 6] - >>> np.cross(x, y) - array([12, -6, -3]) - - Equivalently: - >>> x = [1, 2, 0] >>> y = [4, 5, 6] >>> np.cross(x, y) @@ -1637,10 +1625,10 @@ def cross2d(x, y): Both vectors with dimension 2. - >>> x = [1,2] - >>> y = [4,5] + >>> x = [1, 2, 0] + >>> y = [4, 5, 0] >>> np.cross(x, y) - array(-3) + array([0, 0, -3]) Multiple vector cross-products. Note that the direction of the cross product vector is defined by the *right-hand rule*. @@ -1687,24 +1675,16 @@ def cross2d(x, y): # Move working axis to the end of the shape a = moveaxis(a, axisa, -1) b = moveaxis(b, axisb, -1) - msg = ("incompatible dimensions for cross product\n" - "(dimension must be 2 or 3)") - if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): - raise ValueError(msg) - if a.shape[-1] == 2 or b.shape[-1] == 2: - # Deprecated in NumPy 2.0, 2023-09-26 - warnings.warn( - "Arrays of 2-dimensional vectors are deprecated. Use arrays of " - "3-dimensional vectors instead. (deprecated in NumPy 2.0)", - DeprecationWarning, stacklevel=2 + if a.shape[-1] != 3 or b.shape[-1] != 3: + raise ValueError( + f"Both input arrays must be (arrays of) 3-dimensional vectors, " + f"but they are {a.shape[-1]} and {b.shape[-1]} dimensional instead." ) # Create the output array - shape = broadcast(a[..., 0], b[..., 0]).shape - if a.shape[-1] == 3 or b.shape[-1] == 3: - shape += (3,) - # Check axisc is within bounds - axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc') + shape = *broadcast(a[..., 0], b[..., 0]).shape, 3 + # Check axisc is within bounds + axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc') dtype = promote_types(a.dtype, b.dtype) cp = empty(shape, dtype) @@ -1715,58 +1695,26 @@ def cross2d(x, y): # create local aliases for readability a0 = a[..., 0] a1 = a[..., 1] - if a.shape[-1] == 3: - a2 = a[..., 2] + a2 = a[..., 2] b0 = b[..., 0] b1 = b[..., 1] - if b.shape[-1] == 3: - b2 = b[..., 2] - if cp.ndim != 0 and cp.shape[-1] == 3: - cp0 = cp[..., 0] - cp1 = cp[..., 1] - cp2 = cp[..., 2] - - if a.shape[-1] == 2: - if b.shape[-1] == 2: - # a0 * b1 - a1 * b0 - multiply(a0, b1, out=cp) - cp -= a1 * b0 - return cp - else: - assert b.shape[-1] == 3 - # cp0 = a1 * b2 - 0 (a2 = 0) - # cp1 = 0 - a0 * b2 (a2 = 0) - # cp2 = a0 * b1 - a1 * b0 - multiply(a1, b2, out=cp0) - multiply(a0, b2, out=cp1) - negative(cp1, out=cp1) - multiply(a0, b1, out=cp2) - cp2 -= a1 * b0 - else: - assert a.shape[-1] == 3 - if b.shape[-1] == 3: - # cp0 = a1 * b2 - a2 * b1 - # cp1 = a2 * b0 - a0 * b2 - # cp2 = a0 * b1 - a1 * b0 - multiply(a1, b2, out=cp0) - tmp = np.multiply(a2, b1, out=...) - cp0 -= tmp - multiply(a2, b0, out=cp1) - multiply(a0, b2, out=tmp) - cp1 -= tmp - multiply(a0, b1, out=cp2) - multiply(a1, b0, out=tmp) - cp2 -= tmp - else: - assert b.shape[-1] == 2 - # cp0 = 0 - a2 * b1 (b2 = 0) - # cp1 = a2 * b0 - 0 (b2 = 0) - # cp2 = a0 * b1 - a1 * b0 - multiply(a2, b1, out=cp0) - negative(cp0, out=cp0) - multiply(a2, b0, out=cp1) - multiply(a0, b1, out=cp2) - cp2 -= a1 * b0 + b2 = b[..., 2] + cp0 = cp[..., 0] + cp1 = cp[..., 1] + cp2 = cp[..., 2] + + # cp0 = a1 * b2 - a2 * b1 + # cp1 = a2 * b0 - a0 * b2 + # cp2 = a0 * b1 - a1 * b0 + multiply(a1, b2, out=cp0) + tmp = np.multiply(a2, b1, out=...) + cp0 -= tmp + multiply(a2, b0, out=cp1) + multiply(a0, b2, out=tmp) + cp1 -= tmp + multiply(a0, b1, out=cp2) + multiply(a1, b0, out=tmp) + cp2 -= tmp return moveaxis(cp, -1, axisc) @@ -1922,20 +1870,20 @@ def fromfunction(function, shape, *, dtype=float, like=None, **kwargs): Examples -------- >>> import numpy as np - >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float) + >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=np.float64) array([[0., 0.], [1., 1.]]) - >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float) + >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=np.float64) array([[0., 1.], [0., 1.]]) - >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) + >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=np.int_) array([[ True, False, False], [False, True, False], [False, False, True]]) - >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) + >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=np.int_) array([[0, 1, 2], [1, 2, 3], [2, 3, 4]]) @@ -2057,7 +2005,7 @@ def binary_repr(num, width=None): In a two's-complement system negative numbers are represented by the two's complement of the absolute value. This is the most common method of - representing signed integers on computers [1]_. A N-bit two's-complement + representing signed integers on computers [1]_. An N-bit two's-complement system can represent every integer in the range :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. @@ -2596,13 +2544,13 @@ def array_equal(a1, a2, equal_nan=False): if cannot_have_nan: return builtins.bool(asarray(a1 == a2).all()) - # Handling NaN values if equal_nan is True - a1nan, a2nan = isnan(a1), isnan(a2) - # NaN's occur at different locations - if not (a1nan == a2nan).all(): - return False - # Shapes of a1, a2 and masks are guaranteed to be consistent by this point - return builtins.bool((a1[~a1nan] == a2[~a1nan]).all()) + # Fast path for a1 and a2 being all NaN arrays + a1nan = isnan(a1) + if a1nan.all(): + return builtins.bool(isnan(a2).all()) + + equal_or_both_nan = (a1 == a2) | (a1nan & isnan(a2)) + return builtins.bool(equal_or_both_nan.all()) def _array_equiv_dispatcher(a1, a2): diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 4ad2881cf17e..1dd34d6a4fcd 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -3,11 +3,9 @@ from builtins import bool as py_bool from collections.abc import Callable, Iterable, Sequence from typing import ( Any, - Final, Literal as L, SupportsAbs, SupportsIndex, - TypeAlias, TypeGuard, TypeVar, overload, @@ -30,6 +28,7 @@ from numpy._typing import ( ArrayLike, DTypeLike, NDArray, + _AnyShape, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeComplex_co, @@ -37,13 +36,11 @@ from numpy._typing import ( _ArrayLikeInt_co, _ArrayLikeNumber_co, _ArrayLikeTD64_co, - _CDoubleCodes, _Complex128Codes, - _DoubleCodes, _DTypeLike, _DTypeLikeBool, _Float64Codes, - _IntCodes, + _IntPCodes, _NestedSequence, _NumberLike_co, _ScalarLike_co, @@ -130,7 +127,6 @@ from .multiarray import ( RAISE as RAISE, WRAP as WRAP, _Array, - _ConstructorEmpty, arange, array, asanyarray, @@ -631,23 +627,6 @@ __all__ = [ "zeros_like", ] -_T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=generic) -_NumberObjectT = TypeVar("_NumberObjectT", bound=number | object_) -_NumericScalarT = TypeVar("_NumericScalarT", bound=number | timedelta64 | object_) -_DTypeT = TypeVar("_DTypeT", bound=dtype) -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) -_ShapeT = TypeVar("_ShapeT", bound=_Shape) - -_AnyShapeT = TypeVar( - "_AnyShapeT", - tuple[()], - tuple[int], - tuple[int, int], - tuple[int, int, int], - tuple[int, int, int, int], - tuple[int, ...], -) _AnyNumericScalarT = TypeVar( "_AnyNumericScalarT", np.int8, np.int16, np.int32, np.int64, @@ -658,63 +637,63 @@ _AnyNumericScalarT = TypeVar( np.object_, ) -_CorrelateMode: TypeAlias = L["valid", "same", "full"] +type _CorrelateMode = L["valid", "same", "full"] -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] -_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] -_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[_ScalarT]] -_Array4D: TypeAlias = np.ndarray[tuple[int, int, int, int], np.dtype[_ScalarT]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] +type _Array4D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int, int], np.dtype[ScalarT]] -_Int_co: TypeAlias = np.integer | np.bool -_Float_co: TypeAlias = np.floating | _Int_co -_Number_co: TypeAlias = np.number | np.bool -_TD64_co: TypeAlias = np.timedelta64 | _Int_co +type _Int_co = np.integer | np.bool +type _Float_co = np.floating | _Int_co +type _Number_co = np.number | np.bool +type _TD64_co = np.timedelta64 | _Int_co -_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] -_ArrayLike1DBool_co: TypeAlias = _SupportsArray[np.dtype[np.bool]] | Sequence[py_bool | np.bool] -_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] -_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] -_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] -_ArrayLike1DTD64_co: TypeAlias = _ArrayLike1D[_TD64_co] -_ArrayLike1DObject_co: TypeAlias = _ArrayLike1D[np.object_] +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[np.dtype[ScalarT]] | Sequence[ScalarT] +type _ArrayLike1DBool_co = _SupportsArray[np.dtype[np.bool]] | Sequence[py_bool | np.bool] +type _ArrayLike1DInt_co = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +type _ArrayLike1DFloat_co = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +type _ArrayLike1DNumber_co = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] +type _ArrayLike1DTD64_co = _ArrayLike1D[_TD64_co] +type _ArrayLike1DObject_co = _ArrayLike1D[np.object_] -_DTypeLikeInt: TypeAlias = type[int] | _IntCodes -_DTypeLikeFloat64: TypeAlias = type[float] | _Float64Codes | _DoubleCodes -_DTypeLikeComplex128: TypeAlias = type[complex] | _Complex128Codes | _CDoubleCodes +type _DTypeLikeInt = type[int] | _IntPCodes +type _DTypeLikeFloat64 = type[float] | _Float64Codes +type _DTypeLikeComplex128 = type[complex] | _Complex128Codes ### # keep in sync with `ones_like` @overload -def zeros_like( - a: _ArrayT, +def zeros_like[ArrayT: np.ndarray]( + a: ArrayT, dtype: None = None, order: _OrderKACF = "K", subok: L[True] = True, shape: None = None, *, device: L["cpu"] | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def zeros_like( - a: _ArrayLike[_ScalarT], +def zeros_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, order: _OrderKACF = "K", subok: py_bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def zeros_like( +def zeros_like[ScalarT: np.generic]( a: object, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", subok: py_bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def zeros_like( a: object, @@ -726,39 +705,147 @@ def zeros_like( device: L["cpu"] | None = None, ) -> NDArray[Any]: ... -ones: Final[_ConstructorEmpty] +# keep in sync with empty and zeros (`_core/multiarray.pyi`) +@overload # 1d, float64 default +def ones( + shape: SupportsIndex, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64]: ... +@overload # 1d, specific dtype +def ones[DTypeT: np.dtype]( + shape: SupportsIndex, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[tuple[int], DTypeT]: ... +@overload # 1d, specific scalar type +def ones[ScalarT: np.generic]( + shape: SupportsIndex, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... +@overload # 1d, unknown dtype +def ones( + shape: SupportsIndex, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Incomplete]: ... +@overload # known shape, float64 default +def ones[ShapeT: _Shape]( + shape: ShapeT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, float64]: ... +@overload # known shape, specific dtype +def ones[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[ShapeT, DTypeT]: ... +@overload # known shape, specific scalar type +def ones[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, ScalarT]: ... +@overload # known shape, unknown dtype +def ones[ShapeT: _Shape]( + shape: ShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, Incomplete]: ... +@overload # unknown shape, float64 default +def ones( + shape: _ShapeLike, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[float64]: ... +@overload # unknown shape, specific dtype +def ones[DTypeT: np.dtype]( + shape: _ShapeLike, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[_AnyShape, DTypeT]: ... +@overload # unknown shape, specific scalar type +def ones[ScalarT: np.generic]( + shape: _ShapeLike, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... +@overload # unknown shape, unknown dtype +def ones( + shape: _ShapeLike, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[Incomplete]: ... # keep in sync with `zeros_like` @overload -def ones_like( - a: _ArrayT, +def ones_like[ArrayT: np.ndarray]( + a: ArrayT, dtype: None = None, order: _OrderKACF = "K", subok: L[True] = True, shape: None = None, *, device: L["cpu"] | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def ones_like( - a: _ArrayLike[_ScalarT], +def ones_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, order: _OrderKACF = "K", subok: py_bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def ones_like( +def ones_like[ScalarT: np.generic]( a: object, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", subok: py_bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def ones_like( a: object, @@ -773,35 +860,35 @@ def ones_like( # TODO: Add overloads for bool, int, float, complex, str, bytes, and memoryview # 1-D shape @overload -def full( +def full[ScalarT: np.generic]( shape: SupportsIndex, - fill_value: _ScalarT, + fill_value: ScalarT, dtype: None = None, order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array[tuple[int], _ScalarT]: ... +) -> _Array[tuple[int], ScalarT]: ... @overload -def full( +def full[DTypeT: np.dtype]( shape: SupportsIndex, fill_value: Any, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> np.ndarray[tuple[int], _DTypeT]: ... +) -> np.ndarray[tuple[int], DTypeT]: ... @overload -def full( +def full[ScalarT: np.generic]( shape: SupportsIndex, fill_value: Any, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array[tuple[int], _ScalarT]: ... +) -> _Array[tuple[int], ScalarT]: ... @overload def full( shape: SupportsIndex, @@ -814,76 +901,76 @@ def full( ) -> _Array[tuple[int], Any]: ... # known shape @overload -def full( - shape: _AnyShapeT, - fill_value: _ScalarT, +def full[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + fill_value: ScalarT, dtype: None = None, order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array[_AnyShapeT, _ScalarT]: ... +) -> _Array[ShapeT, ScalarT]: ... @overload -def full( - shape: _AnyShapeT, +def full[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, fill_value: Any, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> np.ndarray[_AnyShapeT, _DTypeT]: ... +) -> np.ndarray[ShapeT, DTypeT]: ... @overload -def full( - shape: _AnyShapeT, +def full[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, fill_value: Any, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array[_AnyShapeT, _ScalarT]: ... +) -> _Array[ShapeT, ScalarT]: ... @overload -def full( - shape: _AnyShapeT, +def full[ShapeT: _Shape]( + shape: ShapeT, fill_value: Any, dtype: DTypeLike | None = None, order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array[_AnyShapeT, Any]: ... +) -> _Array[ShapeT, Any]: ... # unknown shape @overload -def full( +def full[ScalarT: np.generic]( shape: _ShapeLike, - fill_value: _ScalarT, + fill_value: ScalarT, dtype: None = None, order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def full( +def full[DTypeT: np.dtype]( shape: _ShapeLike, fill_value: Any, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> np.ndarray[Any, _DTypeT]: ... +) -> np.ndarray[Any, DTypeT]: ... @overload -def full( +def full[ScalarT: np.generic]( shape: _ShapeLike, fill_value: Any, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def full( shape: _ShapeLike, @@ -896,8 +983,8 @@ def full( ) -> NDArray[Any]: ... @overload -def full_like( - a: _ArrayT, +def full_like[ArrayT: np.ndarray]( + a: ArrayT, fill_value: object, dtype: None = None, order: _OrderKACF = "K", @@ -905,10 +992,10 @@ def full_like( shape: None = None, *, device: L["cpu"] | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def full_like( - a: _ArrayLike[_ScalarT], +def full_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], fill_value: object, dtype: None = None, order: _OrderKACF = "K", @@ -916,18 +1003,18 @@ def full_like( shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def full_like( +def full_like[ScalarT: np.generic]( a: object, fill_value: object, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", subok: py_bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def full_like( a: object, @@ -959,9 +1046,11 @@ def isfortran(a: ndarray | generic) -> py_bool: ... def argwhere(a: ArrayLike) -> _Array2D[np.intp]: ... def flatnonzero(a: ArrayLike) -> _Array1D[np.intp]: ... -# keep in sync with `convolve` +# NOTE: we ignore UP047 because inlining `_AnyScalarT` would result in a lot of code duplication + +# keep in sync with `convolve` and `ma.core.correlate` @overload -def correlate( +def correlate( # noqa: UP047 a: _ArrayLike1D[_AnyNumericScalarT], v: _ArrayLike1D[_AnyNumericScalarT], mode: _CorrelateMode = "valid" ) -> _Array1D[_AnyNumericScalarT]: ... @overload @@ -981,7 +1070,7 @@ def correlate( # keep in sync with `correlate` @overload -def convolve( +def convolve( # noqa: UP047 a: _ArrayLike1D[_AnyNumericScalarT], v: _ArrayLike1D[_AnyNumericScalarT], mode: _CorrelateMode = "valid" ) -> _Array1D[_AnyNumericScalarT]: ... @overload @@ -999,9 +1088,10 @@ def convolve( a: _ArrayLike1DTD64_co, v: _ArrayLike1DTD64_co, mode: _CorrelateMode = "valid" ) -> _Array1D[np.timedelta64 | Any]: ... -# keep roughly in sync with `convolve` and `correlate`, but for 2-D output and an additional `out` overload +# keep roughly in sync with `convolve` and `correlate`, but for 2-D output and an additional `out` overload, +# and also keep in sync with `ma.core.outer` (minus `out`) @overload -def outer( +def outer( # noqa: UP047 a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], out: None = None ) -> _Array2D[_AnyNumericScalarT]: ... @overload @@ -1015,11 +1105,11 @@ def outer(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, out: None = None) -> @overload def outer(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, out: None = None) -> _Array2D[np.timedelta64 | Any]: ... @overload -def outer(a: _ArrayLikeNumber_co | _ArrayLikeTD64_co, b: _ArrayLikeNumber_co | _ArrayLikeTD64_co, out: _ArrayT) -> _ArrayT: ... +def outer[ArrayT: np.ndarray](a: _ArrayLikeNumber_co | _ArrayLikeTD64_co, b: _ArrayLikeNumber_co | _ArrayLikeTD64_co, out: ArrayT) -> ArrayT: ... # keep in sync with numpy.linalg._linalg.tensordot (ignoring `/, *`) @overload -def tensordot( +def tensordot( # noqa: UP047 a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], axes: int | tuple[_ShapeLike, _ShapeLike] = 2 ) -> NDArray[_AnyNumericScalarT]: ... @overload @@ -1039,7 +1129,7 @@ def tensordot( # @overload -def cross( +def cross( # noqa: UP047 a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], axisa: int = -1, @@ -1077,15 +1167,15 @@ def cross( # @overload -def roll(a: _ArrayT, shift: _ShapeLike, axis: _ShapeLike | None = None) -> _ArrayT: ... +def roll[ArrayT: np.ndarray](a: ArrayT, shift: _ShapeLike, axis: _ShapeLike | None = None) -> ArrayT: ... @overload -def roll(a: _ArrayLike[_ScalarT], shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +def roll[ScalarT: np.generic](a: _ArrayLike[ScalarT], shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def roll(a: ArrayLike, shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... # -def rollaxis(a: _ArrayT, axis: int, start: int = 0) -> _ArrayT: ... -def moveaxis(a: _ArrayT, source: _ShapeLike, destination: _ShapeLike) -> _ArrayT: ... +def rollaxis[ArrayT: np.ndarray](a: ArrayT, axis: int, start: int = 0) -> ArrayT: ... +def moveaxis[ArrayT: np.ndarray](a: ArrayT, source: _ShapeLike, destination: _ShapeLike) -> ArrayT: ... def normalize_axis_tuple( axis: int | Iterable[int], ndim: int, @@ -1099,7 +1189,7 @@ def indices(dimensions: tuple[()], dtype: type[int] = int, sparse: L[False] = Fa @overload # 0d, dtype=, sparse=True def indices(dimensions: tuple[()], dtype: DTypeLike | None = int, *, sparse: L[True]) -> tuple[()]: ... @overload # 0d, dtype=, sparse=False (default) -def indices(dimensions: tuple[()], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array1D[_ScalarT]: ... +def indices[ScalarT: np.generic](dimensions: tuple[()], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> _Array1D[ScalarT]: ... @overload # 0d, dtype=, sparse=False (default) def indices(dimensions: tuple[()], dtype: DTypeLike, sparse: L[False] = False) -> _Array1D[Any]: ... @overload # 1d, dtype=int (default), sparse=False (default) @@ -1107,9 +1197,9 @@ def indices(dimensions: tuple[int], dtype: type[int] = int, sparse: L[False] = F @overload # 1d, dtype=int (default), sparse=True def indices(dimensions: tuple[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[_Array1D[np.intp]]: ... @overload # 1d, dtype=, sparse=False (default) -def indices(dimensions: tuple[int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array2D[_ScalarT]: ... +def indices[ScalarT: np.generic](dimensions: tuple[int], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> _Array2D[ScalarT]: ... @overload # 1d, dtype=, sparse=True -def indices(dimensions: tuple[int], dtype: _DTypeLike[_ScalarT], sparse: L[True]) -> tuple[_Array1D[_ScalarT]]: ... +def indices[ScalarT: np.generic](dimensions: tuple[int], dtype: _DTypeLike[ScalarT], sparse: L[True]) -> tuple[_Array1D[ScalarT]]: ... @overload # 1d, dtype=, sparse=False (default) def indices(dimensions: tuple[int], dtype: DTypeLike, sparse: L[False] = False) -> _Array2D[Any]: ... @overload # 1d, dtype=, sparse=True @@ -1121,11 +1211,11 @@ def indices( dimensions: tuple[int, int], dtype: type[int] = int, *, sparse: L[True] ) -> tuple[_Array2D[np.intp], _Array2D[np.intp]]: ... @overload # 2d, dtype=, sparse=False (default) -def indices(dimensions: tuple[int, int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array3D[_ScalarT]: ... +def indices[ScalarT: np.generic](dimensions: tuple[int, int], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> _Array3D[ScalarT]: ... @overload # 2d, dtype=, sparse=True -def indices( - dimensions: tuple[int, int], dtype: _DTypeLike[_ScalarT], sparse: L[True] -) -> tuple[_Array2D[_ScalarT], _Array2D[_ScalarT]]: ... +def indices[ScalarT: np.generic]( + dimensions: tuple[int, int], dtype: _DTypeLike[ScalarT], sparse: L[True] +) -> tuple[_Array2D[ScalarT], _Array2D[ScalarT]]: ... @overload # 2d, dtype=, sparse=False (default) def indices(dimensions: tuple[int, int], dtype: DTypeLike, sparse: L[False] = False) -> _Array3D[Any]: ... @overload # 2d, dtype=, sparse=True @@ -1135,23 +1225,23 @@ def indices(dimensions: Sequence[int], dtype: type[int] = int, sparse: L[False] @overload # ?d, dtype=int (default), sparse=True def indices(dimensions: Sequence[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[NDArray[np.intp], ...]: ... @overload # ?d, dtype=, sparse=False (default) -def indices(dimensions: Sequence[int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> NDArray[_ScalarT]: ... +def indices[ScalarT: np.generic](dimensions: Sequence[int], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> NDArray[ScalarT]: ... @overload # ?d, dtype=, sparse=True -def indices(dimensions: Sequence[int], dtype: _DTypeLike[_ScalarT], sparse: L[True]) -> tuple[NDArray[_ScalarT], ...]: ... +def indices[ScalarT: np.generic](dimensions: Sequence[int], dtype: _DTypeLike[ScalarT], sparse: L[True]) -> tuple[NDArray[ScalarT], ...]: ... @overload # ?d, dtype=, sparse=False (default) def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[False] = False) -> ndarray: ... @overload # ?d, dtype=, sparse=True def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[True]) -> tuple[ndarray, ...]: ... # -def fromfunction( - function: Callable[..., _T], +def fromfunction[ReturnT]( + function: Callable[..., ReturnT], shape: Sequence[int], *, dtype: DTypeLike | None = float, like: _SupportsArrayFunc | None = None, **kwargs: object, -) -> _T: ... +) -> ReturnT: ... # def isscalar(element: object) -> TypeGuard[generic | complex | str | bytes | memoryview]: ... @@ -1164,7 +1254,7 @@ def base_repr(number: SupportsAbs[float], base: float = 2, padding: SupportsInde @overload # dtype: None (default) def identity(n: int, dtype: None = None, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.float64]: ... @overload # dtype: known scalar type -def identity(n: int, dtype: _DTypeLike[_ScalarT], *, like: _SupportsArrayFunc | None = None) -> _Array2D[_ScalarT]: ... +def identity[ScalarT: np.generic](n: int, dtype: _DTypeLike[ScalarT], *, like: _SupportsArrayFunc | None = None) -> _Array2D[ScalarT]: ... @overload # dtype: like bool def identity(n: int, dtype: _DTypeLikeBool, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.bool]: ... @overload # dtype: like int_ @@ -1195,21 +1285,21 @@ def isclose( equal_nan: py_bool = False, ) -> np.bool: ... @overload # known shape, same shape or scalar -def isclose( - a: np.ndarray[_ShapeT], - b: np.ndarray[_ShapeT] | _NumberLike_co, +def isclose[ShapeT: _Shape]( + a: np.ndarray[ShapeT], + b: np.ndarray[ShapeT] | _NumberLike_co, rtol: ArrayLike = 1e-5, atol: ArrayLike = 1e-8, equal_nan: py_bool = False, -) -> np.ndarray[_ShapeT, np.dtype[np.bool]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.bool]]: ... @overload # same shape or scalar, known shape -def isclose( - a: np.ndarray[_ShapeT] | _NumberLike_co, - b: np.ndarray[_ShapeT], +def isclose[ShapeT: _Shape]( + a: np.ndarray[ShapeT] | _NumberLike_co, + b: np.ndarray[ShapeT], rtol: ArrayLike = 1e-5, atol: ArrayLike = 1e-8, equal_nan: py_bool = False, -) -> np.ndarray[_ShapeT, np.dtype[np.bool]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.bool]]: ... @overload # 1d sequence, <=1d array-like def isclose( a: Sequence[_NumberLike_co], @@ -1257,20 +1347,20 @@ def array_equiv(a1: ArrayLike, a2: ArrayLike) -> py_bool: ... # @overload -def astype( - x: ndarray[_ShapeT], - dtype: _DTypeLike[_ScalarT], +def astype[ShapeT: _Shape, ScalarT: np.generic]( + x: ndarray[ShapeT], + dtype: _DTypeLike[ScalarT], /, *, copy: py_bool = True, device: L["cpu"] | None = None, -) -> ndarray[_ShapeT, dtype[_ScalarT]]: ... +) -> ndarray[ShapeT, dtype[ScalarT]]: ... @overload -def astype( - x: ndarray[_ShapeT], +def astype[ShapeT: _Shape]( + x: ndarray[ShapeT], dtype: DTypeLike | None, /, *, copy: py_bool = True, device: L["cpu"] | None = None, -) -> ndarray[_ShapeT]: ... +) -> ndarray[ShapeT]: ... diff --git a/numpy/_core/numerictypes.py b/numpy/_core/numerictypes.py index 265ad4f8eb1f..bd3764f11b84 100644 --- a/numpy/_core/numerictypes.py +++ b/numpy/_core/numerictypes.py @@ -77,7 +77,6 @@ """ import numbers -import warnings from numpy._utils import set_module @@ -106,7 +105,6 @@ # as numerictypes.bool, etc. from builtins import bool, bytes, complex, float, int, object, str # noqa: F401, UP029 -from ._dtype import _kind_name from ._string_helpers import ( # noqa: F401 LOWER_TABLE, UPPER_TABLE, @@ -125,68 +123,6 @@ 'complex64', 'complex128', 'complex192', 'complex256', 'object'] -@set_module('numpy') -def maximum_sctype(t): - """ - Return the scalar type of highest precision of the same kind as the input. - - .. deprecated:: 2.0 - Use an explicit dtype like int64 or float64 instead. - - Parameters - ---------- - t : dtype or dtype specifier - The input data type. This can be a `dtype` object or an object that - is convertible to a `dtype`. - - Returns - ------- - out : dtype - The highest precision data type of the same kind (`dtype.kind`) as `t`. - - See Also - -------- - obj2sctype, mintypecode, sctype2char - dtype - - Examples - -------- - >>> from numpy._core.numerictypes import maximum_sctype - >>> maximum_sctype(int) - - >>> maximum_sctype(np.uint8) - - >>> maximum_sctype(complex) - # may vary - - >>> maximum_sctype(str) - - - >>> maximum_sctype('i2') - - >>> maximum_sctype('f4') - # may vary - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`maximum_sctype` is deprecated. Use an explicit dtype like int64 " - "or float64 instead. (deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - g = obj2sctype(t) - if g is None: - return t - t = g - base = _kind_name(dtype(t)) - if base in sctypes: - return sctypes[base][-1] - else: - return t - @set_module('numpy') def issctype(rep): @@ -220,7 +156,7 @@ def issctype(rep): Strings are also a scalar type: - >>> issctype(np.dtype('str')) + >>> issctype(np.dtype(np.str_)) True """ diff --git a/numpy/_core/overrides.py b/numpy/_core/overrides.py index 6d5e7750b09b..1bd2373dbaa4 100644 --- a/numpy/_core/overrides.py +++ b/numpy/_core/overrides.py @@ -96,8 +96,8 @@ def verify_matching_signatures(implementation, dispatcher): (implementation_spec.defaults is not None and len(implementation_spec.defaults) != len(dispatcher_spec.defaults))): - raise RuntimeError('implementation and dispatcher for %s have ' - 'different function signatures' % implementation) + raise RuntimeError(f'implementation and dispatcher for {implementation} have ' + 'different function signatures') if implementation_spec.defaults is not None: if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults): diff --git a/numpy/_core/overrides.pyi b/numpy/_core/overrides.pyi index 6ef52566d782..627165e98d3d 100644 --- a/numpy/_core/overrides.pyi +++ b/numpy/_core/overrides.pyi @@ -1,13 +1,10 @@ from collections.abc import Callable, Iterable -from typing import Any, Final, NamedTuple, ParamSpec, TypeAlias, TypeVar +from typing import Any, Final, NamedTuple from numpy._utils import set_module as set_module -_T = TypeVar("_T") -_Tss = ParamSpec("_Tss") -_FuncLikeT = TypeVar("_FuncLikeT", bound=type | Callable[..., object]) - -_Dispatcher: TypeAlias = Callable[_Tss, Iterable[object]] +type _FuncLike = type | Callable[..., object] +type _Dispatcher[**_Tss] = Callable[_Tss, Iterable[object]] ### @@ -21,27 +18,27 @@ class ArgSpec(NamedTuple): defaults: tuple[Any, ...] def get_array_function_like_doc(public_api: Callable[..., object], docstring_template: str = "") -> str: ... -def finalize_array_function_like(public_api: _FuncLikeT) -> _FuncLikeT: ... +def finalize_array_function_like[FuncLikeT: _FuncLike](public_api: FuncLikeT) -> FuncLikeT: ... # -def verify_matching_signatures(implementation: Callable[_Tss, object], dispatcher: _Dispatcher[_Tss]) -> None: ... +def verify_matching_signatures[**Tss](implementation: Callable[Tss, object], dispatcher: _Dispatcher[Tss]) -> None: ... # NOTE: This actually returns a `_ArrayFunctionDispatcher` callable wrapper object, with # the original wrapped callable stored in the `._implementation` attribute. It checks # for any `__array_function__` of the values of specific arguments that the dispatcher # specifies. Since the dispatcher only returns an iterable of passed array-like args, # this overridable behaviour is impossible to annotate. -def array_function_dispatch( - dispatcher: _Dispatcher[_Tss] | None = None, +def array_function_dispatch[**Tss, FuncLikeT: _FuncLike]( + dispatcher: _Dispatcher[Tss] | None = None, module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = False, -) -> Callable[[_FuncLikeT], _FuncLikeT]: ... +) -> Callable[[FuncLikeT], FuncLikeT]: ... # -def array_function_from_dispatcher( - implementation: Callable[_Tss, _T], +def array_function_from_dispatcher[**Tss, T]( + implementation: Callable[Tss, T], module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = True, -) -> Callable[[_Dispatcher[_Tss]], Callable[_Tss, _T]]: ... +) -> Callable[[_Dispatcher[Tss]], Callable[Tss, T]]: ... diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 9a6af16e3b23..dbf84efcdd56 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -106,10 +106,10 @@ class format_parser: titles will simply not appear. If `names` is empty, default field names will be used. - >>> np.rec.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], + >>> np.rec.format_parser(['f8', 'i4', 'S5'], ['col1', 'col2', 'col3'], ... []).dtype dtype([('col1', '>> np.rec.format_parser(['>> np.rec.format_parser(['{maxlen}}: {getattr(self, name)}" for name in names] return "\n".join(rows) # The recarray is almost identical to a standard array (which supports @@ -383,7 +382,7 @@ class recarray(ndarray): """ - def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None, + def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, formats=None, names=None, titles=None, byteorder=None, aligned=False, order='C'): @@ -395,12 +394,10 @@ def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None, ).dtype if buf is None: - self = ndarray.__new__( - subtype, shape, (record, descr), order=order - ) + self = ndarray.__new__(cls, shape, (record, descr), order=order) else: self = ndarray.__new__( - subtype, shape, (record, descr), buffer=buf, + cls, shape, (record, descr), buffer=buf, offset=offset, strides=strides, order=order ) return self @@ -458,7 +455,11 @@ def __setattr__(self, attr, val): newattr = attr not in self.__dict__ try: - ret = object.__setattr__(self, attr, val) + if attr == 'dtype': + # gh-29244 + ret = self._set_dtype(val) + else: + ret = object.__setattr__(self, attr, val) except Exception: fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} if attr not in fielddict: @@ -742,7 +743,7 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, return _array else: if shape is not None and retval.shape != shape: - retval.shape = shape + retval = retval.reshape(shape) res = retval.view(recarray) @@ -863,7 +864,7 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, Examples -------- >>> from tempfile import TemporaryFile - >>> a = np.empty(10,dtype='f8,i4,a5') + >>> a = np.empty(10,dtype='f8,i4,S5') >>> a[5] = (0.5,10,'abcde') >>> >>> fd=TemporaryFile() @@ -871,7 +872,7 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, >>> a.tofile(fd) >>> >>> _ = fd.seek(0) - >>> r=np.rec.fromfile(fd, formats='f8,i4,a5', shape=10, + >>> r=np.rec.fromfile(fd, formats='f8,i4,S5', shape=10, ... byteorder='<') >>> print(r[5]) (0.5, 10, b'abcde') diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index 511a6a764829..7b9c36057c35 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -1,18 +1,16 @@ -# ruff: noqa: ANN401 # pyright: reportSelfClsParameterName=false from _typeshed import Incomplete, StrOrBytesPath -from collections.abc import Iterable, Sequence +from collections.abc import Buffer, Iterable, Sequence from typing import ( Any, ClassVar, Literal, Protocol, SupportsIndex, - TypeAlias, overload, type_check_only, ) -from typing_extensions import Buffer, TypeVar +from typing_extensions import TypeVar import numpy as np from numpy import _ByteOrder, _OrderKACF @@ -39,12 +37,11 @@ __all__ = [ "record", ] -_T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=np.generic) +# Explicit covariant type variables are needed because mypy isn't very good at variance inference right now. _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_RecArray: TypeAlias = recarray[_AnyShape, np.dtype[_ScalarT]] +type _RecArray[_ScalarT: np.generic] = recarray[_AnyShape, np.dtype[_ScalarT]] @type_check_only class _SupportsReadInto(Protocol): @@ -57,26 +54,27 @@ class _SupportsReadInto(Protocol): # exported in `numpy.rec` class record(np.void): # type: ignore[misc] __name__: ClassVar[Literal["record"]] = "record" - __module__: Literal["numpy"] = "numpy" + __module__: Literal["numpy"] = "numpy" # pyrefly: ignore[bad-override] def pprint(self) -> str: ... def __getattribute__(self, attr: str, /) -> Any: ... def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... - @overload - def __getitem__(self, key: str | SupportsIndex, /) -> Incomplete: ... + # + @overload # type: ignore[override] + def __getitem__(self, key: str | SupportsIndex, /) -> Incomplete: ... # pyrefly: ignore[bad-override] @overload def __getitem__(self, key: list[str], /) -> record: ... # exported in `numpy.rec` class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): __name__: ClassVar[Literal["recarray"]] = "recarray" - __module__: Literal["numpy.rec"] = "numpy.rec" + __module__: Literal["numpy.rec"] = "numpy.rec" # pyrefly: ignore[bad-override] @overload def __new__( - subtype, + cls, shape: _ShapeLike, dtype: None = None, buf: Buffer | None = None, @@ -92,7 +90,7 @@ class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): ) -> _RecArray[record]: ... @overload def __new__( - subtype, + cls, shape: _ShapeLike, dtype: DTypeLike | None, buf: Buffer | None = None, @@ -235,8 +233,8 @@ def fromfile( # exported in `numpy.rec` @overload -def array( - obj: _ScalarT | NDArray[_ScalarT], +def array[ScalarT: np.generic]( + obj: ScalarT | NDArray[ScalarT], dtype: None = None, shape: _ShapeLike | None = None, offset: int = 0, @@ -247,7 +245,7 @@ def array( aligned: bool = False, byteorder: None = None, copy: bool = True, -) -> _RecArray[_ScalarT]: ... +) -> _RecArray[ScalarT]: ... @overload def array( obj: ArrayLike, @@ -337,4 +335,4 @@ def array( ) -> _RecArray[record]: ... # exported in `numpy.rec` -def find_duplicate(list: Iterable[_T]) -> list[_T]: ... +def find_duplicate[T](list: Iterable[T]) -> list[T]: ... diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index b87cae8a5f0f..9e4ac855c557 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -1,7 +1,8 @@ from collections.abc import Sequence -from typing import Any, SupportsIndex, TypeVar, overload +from typing import Any, SupportsIndex, overload -from numpy import _CastingKind, generic +import numpy as np +from numpy import _CastingKind from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLike, _DTypeLike __all__ = [ @@ -15,21 +16,28 @@ __all__ = [ "vstack", ] -_T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=generic) -_ScalarT1 = TypeVar("_ScalarT1", bound=generic) -_ScalarT2 = TypeVar("_ScalarT2", bound=generic) -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) - -### +type _Array0D[ScalarT: np.generic] = np.ndarray[tuple[()], np.dtype[ScalarT]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] # keep in sync with `numpy.ma.extras.atleast_1d` @overload -def atleast_1d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +def atleast_1d[ArrayT: _Array1D[Any] | _Array2D[Any] | _Array3D[Any]](a0: ArrayT, /) -> ArrayT: ... +@overload +def atleast_1d[ScalarT: np.generic](a0: _Array0D[ScalarT], /) -> _Array1D[ScalarT]: ... +@overload +def atleast_1d[ScalarT: np.generic](a0: ScalarT, /) -> _Array1D[ScalarT]: ... @overload -def atleast_1d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +def atleast_1d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload -def atleast_1d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +def atleast_1d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[NDArray[ScalarT1], NDArray[ScalarT2]]: ... +@overload +def atleast_1d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[NDArray[ScalarT], ...]: ... @overload def atleast_1d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload @@ -39,11 +47,21 @@ def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray # keep in sync with `numpy.ma.extras.atleast_2d` @overload -def atleast_2d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +def atleast_2d[ArrayT: _Array2D[Any] | _Array3D[Any]](a0: ArrayT, /) -> ArrayT: ... +@overload +def atleast_2d[ScalarT: np.generic](a0: _Array0D[ScalarT] | _Array1D[ScalarT], /) -> _Array2D[ScalarT]: ... @overload -def atleast_2d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +def atleast_2d[ScalarT: np.generic](a0: ScalarT, /) -> _Array2D[ScalarT]: ... @overload -def atleast_2d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +def atleast_2d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... +@overload +def atleast_2d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[NDArray[ScalarT1], NDArray[ScalarT2]]: ... +@overload +def atleast_2d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[NDArray[ScalarT], ...]: ... @overload def atleast_2d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload @@ -53,11 +71,21 @@ def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray # keep in sync with `numpy.ma.extras.atleast_3d` @overload -def atleast_3d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +def atleast_3d[ArrayT: _Array3D[Any]](a0: ArrayT, /) -> ArrayT: ... +@overload +def atleast_3d[ScalarT: np.generic](a0: _Array0D[ScalarT] | _Array1D[ScalarT] | _Array2D[ScalarT], /) -> _Array3D[ScalarT]: ... +@overload +def atleast_3d[ScalarT: np.generic](a0: ScalarT, /) -> _Array3D[ScalarT]: ... +@overload +def atleast_3d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload -def atleast_3d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +def atleast_3d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[NDArray[ScalarT1], NDArray[ScalarT2]]: ... @overload -def atleast_3d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +def atleast_3d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[NDArray[ScalarT], ...]: ... @overload def atleast_3d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload @@ -66,23 +94,23 @@ def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[A def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... # used by numpy.lib._shape_base_impl -def _arrays_for_stack_dispatcher(arrays: Sequence[_T]) -> tuple[_T, ...]: ... +def _arrays_for_stack_dispatcher[T](arrays: Sequence[T]) -> tuple[T, ...]: ... # keep in sync with `numpy.ma.extras.vstack` @overload -def vstack( - tup: Sequence[_ArrayLike[_ScalarT]], +def vstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def vstack( +def vstack[ScalarT: np.generic]( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def vstack( tup: Sequence[ArrayLike], @@ -93,19 +121,19 @@ def vstack( # keep in sync with `numpy.ma.extras.hstack` @overload -def hstack( - tup: Sequence[_ArrayLike[_ScalarT]], +def hstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def hstack( +def hstack[ScalarT: np.generic]( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def hstack( tup: Sequence[ArrayLike], @@ -116,23 +144,23 @@ def hstack( # keep in sync with `numpy.ma.extras.stack` @overload -def stack( - arrays: Sequence[_ArrayLike[_ScalarT]], +def stack[ScalarT: np.generic]( + arrays: Sequence[_ArrayLike[ScalarT]], axis: SupportsIndex = 0, out: None = None, *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def stack( +def stack[ScalarT: np.generic]( arrays: Sequence[ArrayLike], axis: SupportsIndex = 0, out: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def stack( arrays: Sequence[ArrayLike], @@ -143,31 +171,31 @@ def stack( casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @overload -def stack( +def stack[OutT: np.ndarray]( arrays: Sequence[ArrayLike], axis: SupportsIndex, - out: _ArrayT, + out: OutT, *, dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind", -) -> _ArrayT: ... +) -> OutT: ... @overload -def stack( +def stack[OutT: np.ndarray]( arrays: Sequence[ArrayLike], axis: SupportsIndex = 0, *, - out: _ArrayT, + out: OutT, dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind", -) -> _ArrayT: ... +) -> OutT: ... @overload -def unstack( - array: _ArrayLike[_ScalarT], +def unstack[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], /, *, axis: int = 0, -) -> tuple[NDArray[_ScalarT], ...]: ... +) -> tuple[NDArray[ScalarT], ...]: ... @overload def unstack( array: ArrayLike, @@ -177,6 +205,6 @@ def unstack( ) -> tuple[NDArray[Any], ...]: ... @overload -def block(arrays: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +def block[ScalarT: np.generic](arrays: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload def block(arrays: ArrayLike) -> NDArray[Any]: ... diff --git a/numpy/_core/src/_simd/_simd_inc.h.src b/numpy/_core/src/_simd/_simd_inc.h.src index a023848831ed..192000f7165a 100644 --- a/numpy/_core/src/_simd/_simd_inc.h.src +++ b/numpy/_core/src/_simd/_simd_inc.h.src @@ -104,21 +104,21 @@ typedef struct { // type name compatible with python style const char *pyname; - // returns '1' if the type represent a unsigned integer + // returns '1' if the type represents an unsigned integer unsigned int is_unsigned:1; - // returns '1' if the type represent a signed integer + // returns '1' if the type represents a signed integer unsigned int is_signed:1; - // returns '1' if the type represent a single or double precision + // returns '1' if the type represents a single or double precision unsigned int is_float:1; - // returns '1' if the type represent a boolean + // returns '1' if the type represents a boolean unsigned int is_bool:1; - // returns '1' if the type represent a sequence + // returns '1' if the type represents a sequence unsigned int is_sequence:1; - // returns '1' if the type represent a scalar + // returns '1' if the type represents a scalar unsigned int is_scalar:1; - // returns '1' if the type represent a vector + // returns '1' if the type represents a vector unsigned int is_vector:1; - // returns the len of multi-vector if the type represent x2 or x3 vector + // returns the len of multi-vector if the type represents x2 or x3 vector // otherwise returns 0, e.g. returns 2 if data type is simd_data_vu8x2 int is_vectorx; // returns the equivalent scalar data type e.g. simd_data_vu8 -> simd_data_u8 diff --git a/numpy/distutils/checks/cpu_asimd.c b/numpy/_core/src/_simd/checks/cpu_asimd.c similarity index 100% rename from numpy/distutils/checks/cpu_asimd.c rename to numpy/_core/src/_simd/checks/cpu_asimd.c diff --git a/numpy/distutils/checks/cpu_asimddp.c b/numpy/_core/src/_simd/checks/cpu_asimddp.c similarity index 100% rename from numpy/distutils/checks/cpu_asimddp.c rename to numpy/_core/src/_simd/checks/cpu_asimddp.c diff --git a/numpy/distutils/checks/cpu_asimdfhm.c b/numpy/_core/src/_simd/checks/cpu_asimdfhm.c similarity index 100% rename from numpy/distutils/checks/cpu_asimdfhm.c rename to numpy/_core/src/_simd/checks/cpu_asimdfhm.c diff --git a/numpy/distutils/checks/cpu_asimdhp.c b/numpy/_core/src/_simd/checks/cpu_asimdhp.c similarity index 100% rename from numpy/distutils/checks/cpu_asimdhp.c rename to numpy/_core/src/_simd/checks/cpu_asimdhp.c diff --git a/numpy/distutils/checks/cpu_avx.c b/numpy/_core/src/_simd/checks/cpu_avx.c similarity index 100% rename from numpy/distutils/checks/cpu_avx.c rename to numpy/_core/src/_simd/checks/cpu_avx.c diff --git a/numpy/distutils/checks/cpu_avx2.c b/numpy/_core/src/_simd/checks/cpu_avx2.c similarity index 100% rename from numpy/distutils/checks/cpu_avx2.c rename to numpy/_core/src/_simd/checks/cpu_avx2.c diff --git a/numpy/distutils/checks/cpu_avx512_clx.c b/numpy/_core/src/_simd/checks/cpu_avx512_clx.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_clx.c rename to numpy/_core/src/_simd/checks/cpu_avx512_clx.c diff --git a/numpy/distutils/checks/cpu_avx512_cnl.c b/numpy/_core/src/_simd/checks/cpu_avx512_cnl.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_cnl.c rename to numpy/_core/src/_simd/checks/cpu_avx512_cnl.c diff --git a/numpy/distutils/checks/cpu_avx512_icl.c b/numpy/_core/src/_simd/checks/cpu_avx512_icl.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_icl.c rename to numpy/_core/src/_simd/checks/cpu_avx512_icl.c diff --git a/numpy/distutils/checks/cpu_avx512_knl.c b/numpy/_core/src/_simd/checks/cpu_avx512_knl.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_knl.c rename to numpy/_core/src/_simd/checks/cpu_avx512_knl.c diff --git a/numpy/distutils/checks/cpu_avx512_knm.c b/numpy/_core/src/_simd/checks/cpu_avx512_knm.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_knm.c rename to numpy/_core/src/_simd/checks/cpu_avx512_knm.c diff --git a/numpy/distutils/checks/cpu_avx512_skx.c b/numpy/_core/src/_simd/checks/cpu_avx512_skx.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_skx.c rename to numpy/_core/src/_simd/checks/cpu_avx512_skx.c diff --git a/numpy/distutils/checks/cpu_avx512_spr.c b/numpy/_core/src/_simd/checks/cpu_avx512_spr.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_spr.c rename to numpy/_core/src/_simd/checks/cpu_avx512_spr.c diff --git a/numpy/distutils/checks/cpu_avx512cd.c b/numpy/_core/src/_simd/checks/cpu_avx512cd.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512cd.c rename to numpy/_core/src/_simd/checks/cpu_avx512cd.c diff --git a/numpy/distutils/checks/cpu_avx512f.c b/numpy/_core/src/_simd/checks/cpu_avx512f.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512f.c rename to numpy/_core/src/_simd/checks/cpu_avx512f.c diff --git a/numpy/distutils/checks/cpu_f16c.c b/numpy/_core/src/_simd/checks/cpu_f16c.c similarity index 100% rename from numpy/distutils/checks/cpu_f16c.c rename to numpy/_core/src/_simd/checks/cpu_f16c.c diff --git a/numpy/distutils/checks/cpu_fma3.c b/numpy/_core/src/_simd/checks/cpu_fma3.c similarity index 100% rename from numpy/distutils/checks/cpu_fma3.c rename to numpy/_core/src/_simd/checks/cpu_fma3.c diff --git a/numpy/distutils/checks/cpu_fma4.c b/numpy/_core/src/_simd/checks/cpu_fma4.c similarity index 100% rename from numpy/distutils/checks/cpu_fma4.c rename to numpy/_core/src/_simd/checks/cpu_fma4.c diff --git a/numpy/distutils/checks/cpu_lsx.c b/numpy/_core/src/_simd/checks/cpu_lsx.c similarity index 100% rename from numpy/distutils/checks/cpu_lsx.c rename to numpy/_core/src/_simd/checks/cpu_lsx.c diff --git a/numpy/distutils/checks/cpu_neon.c b/numpy/_core/src/_simd/checks/cpu_neon.c similarity index 100% rename from numpy/distutils/checks/cpu_neon.c rename to numpy/_core/src/_simd/checks/cpu_neon.c diff --git a/numpy/distutils/checks/cpu_neon_fp16.c b/numpy/_core/src/_simd/checks/cpu_neon_fp16.c similarity index 100% rename from numpy/distutils/checks/cpu_neon_fp16.c rename to numpy/_core/src/_simd/checks/cpu_neon_fp16.c diff --git a/numpy/distutils/checks/cpu_neon_vfpv4.c b/numpy/_core/src/_simd/checks/cpu_neon_vfpv4.c similarity index 100% rename from numpy/distutils/checks/cpu_neon_vfpv4.c rename to numpy/_core/src/_simd/checks/cpu_neon_vfpv4.c diff --git a/numpy/distutils/checks/cpu_popcnt.c b/numpy/_core/src/_simd/checks/cpu_popcnt.c similarity index 100% rename from numpy/distutils/checks/cpu_popcnt.c rename to numpy/_core/src/_simd/checks/cpu_popcnt.c diff --git a/numpy/distutils/checks/cpu_rvv.c b/numpy/_core/src/_simd/checks/cpu_rvv.c similarity index 100% rename from numpy/distutils/checks/cpu_rvv.c rename to numpy/_core/src/_simd/checks/cpu_rvv.c diff --git a/numpy/distutils/checks/cpu_sse.c b/numpy/_core/src/_simd/checks/cpu_sse.c similarity index 100% rename from numpy/distutils/checks/cpu_sse.c rename to numpy/_core/src/_simd/checks/cpu_sse.c diff --git a/numpy/distutils/checks/cpu_sse2.c b/numpy/_core/src/_simd/checks/cpu_sse2.c similarity index 100% rename from numpy/distutils/checks/cpu_sse2.c rename to numpy/_core/src/_simd/checks/cpu_sse2.c diff --git a/numpy/distutils/checks/cpu_sse3.c b/numpy/_core/src/_simd/checks/cpu_sse3.c similarity index 100% rename from numpy/distutils/checks/cpu_sse3.c rename to numpy/_core/src/_simd/checks/cpu_sse3.c diff --git a/numpy/distutils/checks/cpu_sse41.c b/numpy/_core/src/_simd/checks/cpu_sse41.c similarity index 100% rename from numpy/distutils/checks/cpu_sse41.c rename to numpy/_core/src/_simd/checks/cpu_sse41.c diff --git a/numpy/distutils/checks/cpu_sse42.c b/numpy/_core/src/_simd/checks/cpu_sse42.c similarity index 100% rename from numpy/distutils/checks/cpu_sse42.c rename to numpy/_core/src/_simd/checks/cpu_sse42.c diff --git a/numpy/distutils/checks/cpu_ssse3.c b/numpy/_core/src/_simd/checks/cpu_ssse3.c similarity index 100% rename from numpy/distutils/checks/cpu_ssse3.c rename to numpy/_core/src/_simd/checks/cpu_ssse3.c diff --git a/numpy/distutils/checks/cpu_sve.c b/numpy/_core/src/_simd/checks/cpu_sve.c similarity index 100% rename from numpy/distutils/checks/cpu_sve.c rename to numpy/_core/src/_simd/checks/cpu_sve.c diff --git a/numpy/distutils/checks/cpu_vsx.c b/numpy/_core/src/_simd/checks/cpu_vsx.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx.c rename to numpy/_core/src/_simd/checks/cpu_vsx.c diff --git a/numpy/distutils/checks/cpu_vsx2.c b/numpy/_core/src/_simd/checks/cpu_vsx2.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx2.c rename to numpy/_core/src/_simd/checks/cpu_vsx2.c diff --git a/numpy/distutils/checks/cpu_vsx3.c b/numpy/_core/src/_simd/checks/cpu_vsx3.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx3.c rename to numpy/_core/src/_simd/checks/cpu_vsx3.c diff --git a/numpy/distutils/checks/cpu_vsx4.c b/numpy/_core/src/_simd/checks/cpu_vsx4.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx4.c rename to numpy/_core/src/_simd/checks/cpu_vsx4.c diff --git a/numpy/distutils/checks/cpu_vx.c b/numpy/_core/src/_simd/checks/cpu_vx.c similarity index 100% rename from numpy/distutils/checks/cpu_vx.c rename to numpy/_core/src/_simd/checks/cpu_vx.c diff --git a/numpy/distutils/checks/cpu_vxe.c b/numpy/_core/src/_simd/checks/cpu_vxe.c similarity index 100% rename from numpy/distutils/checks/cpu_vxe.c rename to numpy/_core/src/_simd/checks/cpu_vxe.c diff --git a/numpy/distutils/checks/cpu_vxe2.c b/numpy/_core/src/_simd/checks/cpu_vxe2.c similarity index 100% rename from numpy/distutils/checks/cpu_vxe2.c rename to numpy/_core/src/_simd/checks/cpu_vxe2.c diff --git a/numpy/distutils/checks/cpu_xop.c b/numpy/_core/src/_simd/checks/cpu_xop.c similarity index 100% rename from numpy/distutils/checks/cpu_xop.c rename to numpy/_core/src/_simd/checks/cpu_xop.c diff --git a/numpy/distutils/checks/extra_avx512bw_mask.c b/numpy/_core/src/_simd/checks/extra_avx512bw_mask.c similarity index 100% rename from numpy/distutils/checks/extra_avx512bw_mask.c rename to numpy/_core/src/_simd/checks/extra_avx512bw_mask.c diff --git a/numpy/distutils/checks/extra_avx512dq_mask.c b/numpy/_core/src/_simd/checks/extra_avx512dq_mask.c similarity index 100% rename from numpy/distutils/checks/extra_avx512dq_mask.c rename to numpy/_core/src/_simd/checks/extra_avx512dq_mask.c diff --git a/numpy/distutils/checks/extra_avx512f_reduce.c b/numpy/_core/src/_simd/checks/extra_avx512f_reduce.c similarity index 100% rename from numpy/distutils/checks/extra_avx512f_reduce.c rename to numpy/_core/src/_simd/checks/extra_avx512f_reduce.c diff --git a/numpy/distutils/checks/extra_vsx3_half_double.c b/numpy/_core/src/_simd/checks/extra_vsx3_half_double.c similarity index 100% rename from numpy/distutils/checks/extra_vsx3_half_double.c rename to numpy/_core/src/_simd/checks/extra_vsx3_half_double.c diff --git a/numpy/distutils/checks/extra_vsx4_mma.c b/numpy/_core/src/_simd/checks/extra_vsx4_mma.c similarity index 100% rename from numpy/distutils/checks/extra_vsx4_mma.c rename to numpy/_core/src/_simd/checks/extra_vsx4_mma.c diff --git a/numpy/distutils/checks/extra_vsx_asm.c b/numpy/_core/src/_simd/checks/extra_vsx_asm.c similarity index 100% rename from numpy/distutils/checks/extra_vsx_asm.c rename to numpy/_core/src/_simd/checks/extra_vsx_asm.c diff --git a/numpy/distutils/checks/test_flags.c b/numpy/_core/src/_simd/checks/test_flags.c similarity index 100% rename from numpy/distutils/checks/test_flags.c rename to numpy/_core/src/_simd/checks/test_flags.c diff --git a/numpy/_core/src/common/cblasfuncs.c b/numpy/_core/src/common/cblasfuncs.c index 66a215dfeb64..04ca81086bcf 100644 --- a/numpy/_core/src/common/cblasfuncs.c +++ b/numpy/_core/src/common/cblasfuncs.c @@ -225,10 +225,11 @@ _bad_strides(PyArrayObject *ap) * __array_ufunc__ nonsense is also assumed to have been taken care of. */ NPY_NO_EXPORT PyObject * -cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, +cblas_matrixproduct(PyArray_Descr *typec, PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject *out) { PyArrayObject *result = NULL, *out_buf = NULL; + int typenum = typec->type_num; npy_intp j, lda, ldb; npy_intp l; int nd; @@ -364,7 +365,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, } } - out_buf = new_array_for_sum(ap1, ap2, out, nd, dimensions, typenum, &result); + out_buf = new_array_for_sum(ap1, ap2, out, nd, dimensions, typec, &result); if (out_buf == NULL) { goto fail; } diff --git a/numpy/_core/src/common/cblasfuncs.h b/numpy/_core/src/common/cblasfuncs.h index 71c533f369a4..fb9c325dd4d9 100644 --- a/numpy/_core/src/common/cblasfuncs.h +++ b/numpy/_core/src/common/cblasfuncs.h @@ -2,6 +2,6 @@ #define NUMPY_CORE_SRC_COMMON_CBLASFUNCS_H_ NPY_NO_EXPORT PyObject * -cblas_matrixproduct(int, PyArrayObject *, PyArrayObject *, PyArrayObject *); +cblas_matrixproduct(PyArray_Descr *, PyArrayObject *, PyArrayObject *, PyArrayObject *); #endif /* NUMPY_CORE_SRC_COMMON_CBLASFUNCS_H_ */ diff --git a/numpy/_core/src/common/gil_utils.c b/numpy/_core/src/common/gil_utils.c index 95af26a2bf8e..c87cbe2d64ae 100644 --- a/numpy/_core/src/common/gil_utils.c +++ b/numpy/_core/src/common/gil_utils.c @@ -16,9 +16,6 @@ npy_gil_error(PyObject *type, const char *format, ...) NPY_ALLOW_C_API_DEF; NPY_ALLOW_C_API; if (!PyErr_Occurred()) { -#if !defined(PYPY_VERSION) - PyErr_FormatV(type, format, va); -#else PyObject *exc_str = PyUnicode_FromFormatV(format, va); if (exc_str == NULL) { // no reason to have special handling for this error case, since @@ -29,7 +26,6 @@ npy_gil_error(PyObject *type, const char *format, ...) } PyErr_SetObject(type, exc_str); Py_DECREF(exc_str); -#endif } NPY_DISABLE_C_API; va_end(va); diff --git a/numpy/_core/src/common/lowlevel_strided_loops.h b/numpy/_core/src/common/lowlevel_strided_loops.h index 9bcfcf2d3f37..fedd38a7b212 100644 --- a/numpy/_core/src/common/lowlevel_strided_loops.h +++ b/numpy/_core/src/common/lowlevel_strided_loops.h @@ -10,10 +10,14 @@ /* For PyArray_ macros used below */ #include "numpy/ndarrayobject.h" +#ifdef __cplusplus +extern "C" { +#endif + /* * NOTE: This API should remain private for the time being, to allow * for further refinement. I think the 'aligned' mechanism - * needs changing, for example. + * needs changing, for example. * * Note: Updated in 2018 to distinguish "true" from "uint" alignment. */ @@ -787,4 +791,9 @@ PyArray_EQUIVALENTLY_ITERABLE_OVERLAP_OK(PyArrayObject *arr1, PyArrayObject *arr stride2 = PyArray_TRIVIAL_PAIR_ITERATION_STRIDE(size2, arr2); \ } + +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_COMMON_LOWLEVEL_STRIDED_LOOPS_H_ */ diff --git a/numpy/_core/src/common/npy_argparse.c b/numpy/_core/src/common/npy_argparse.c index aa011be9c585..8961bdd61e49 100644 --- a/numpy/_core/src/common/npy_argparse.c +++ b/numpy/_core/src/common/npy_argparse.c @@ -1,13 +1,14 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include +#include #include "numpy/ndarraytypes.h" #include "numpy/npy_2_compat.h" #include "npy_argparse.h" -#include "npy_atomic.h" #include "npy_import.h" #include "arrayfunction_override.h" @@ -75,8 +76,6 @@ PyArray_PythonPyIntFromInt(PyObject *obj, int *value) } -typedef int convert(PyObject *, void *); - /** * Internal function to initialize keyword argument parsing. * @@ -91,55 +90,41 @@ typedef int convert(PyObject *, void *); * * @param funcname Name of the function, mainly used for errors. * @param cache A cache object stored statically in the parsing function - * @param va_orig Argument list to npy_parse_arguments + * @param specs Array of argument specifications + * @param nspecs Number of argument specifications * @return 0 on success, -1 on failure */ static int initialize_keywords(const char *funcname, - _NpyArgParserCache *cache, va_list va_orig) { - va_list va; - int nargs = 0; + _NpyArgParserCache *cache, npy_arg_spec *specs, int nspecs) { int nkwargs = 0; int npositional_only = 0; int nrequired = 0; int npositional = 0; char state = '\0'; - va_copy(va, va_orig); - while (1) { - /* Count length first: */ - char *name = va_arg(va, char *); - convert *converter = va_arg(va, convert *); - void *data = va_arg(va, void *); - - /* Check if this is the sentinel, only converter may be NULL */ - if ((name == NULL) && (converter == NULL) && (data == NULL)) { - break; - } + for (int i = 0; i < nspecs; i++) { + const char *name = specs[i].name; if (name == NULL) { PyErr_Format(PyExc_SystemError, "NumPy internal error: name is NULL in %s() at " - "argument %d.", funcname, nargs); - va_end(va); + "argument %d.", funcname, i); return -1; } - if (data == NULL) { + if (specs[i].output == NULL) { PyErr_Format(PyExc_SystemError, "NumPy internal error: data is NULL in %s() at " - "argument %d.", funcname, nargs); - va_end(va); + "argument %d.", funcname, i); return -1; } - nargs += 1; if (*name == '|') { if (state == '$') { PyErr_Format(PyExc_SystemError, "NumPy internal error: positional argument `|` " "after keyword only `$` one to %s() at argument %d.", - funcname, nargs); - va_end(va); + funcname, i + 1); return -1; } state = '|'; @@ -155,8 +140,7 @@ initialize_keywords(const char *funcname, PyErr_Format(PyExc_SystemError, "NumPy internal error: non-required argument after " "required | or $ one to %s() at argument %d.", - funcname, nargs); - va_end(va); + funcname, i + 1); return -1; } @@ -171,8 +155,7 @@ initialize_keywords(const char *funcname, PyErr_Format(PyExc_SystemError, "NumPy internal error: non-kwarg marked with $ " "to %s() at argument %d or positional only following " - "kwarg.", funcname, nargs); - va_end(va); + "kwarg.", funcname, i + 1); return -1; } } @@ -180,18 +163,17 @@ initialize_keywords(const char *funcname, nkwargs += 1; } } - va_end(va); if (npositional == -1) { - npositional = nargs; + npositional = nspecs; } - if (nargs > _NPY_MAX_KWARGS) { + if (nspecs > _NPY_MAX_KWARGS) { PyErr_Format(PyExc_SystemError, "NumPy internal error: function %s() has %d arguments, but " "the maximum is currently limited to %d for easier parsing; " "it can be increased by modifying `_NPY_MAX_KWARGS`.", - funcname, nargs, _NPY_MAX_KWARGS); + funcname, nspecs, _NPY_MAX_KWARGS); return -1; } @@ -199,7 +181,7 @@ initialize_keywords(const char *funcname, * Do any necessary string allocation and interning, * creating a caching object. */ - cache->nargs = nargs; + cache->nargs = nspecs; cache->npositional_only = npositional_only; cache->npositional = npositional; cache->nrequired = nrequired; @@ -207,12 +189,8 @@ initialize_keywords(const char *funcname, /* NULL kw_strings for easier cleanup (and NULL termination) */ memset(cache->kw_strings, 0, sizeof(PyObject *) * (nkwargs + 1)); - va_copy(va, va_orig); - for (int i = 0; i < nargs; i++) { - /* Advance through non-kwargs, which do not require setup. */ - char *name = va_arg(va, char *); - va_arg(va, convert *); - va_arg(va, void *); + for (int i = 0; i < nspecs; i++) { + const char *name = specs[i].name; if (*name == '|' || *name == '$') { name++; /* ignore | and $ */ @@ -221,13 +199,11 @@ initialize_keywords(const char *funcname, int i_kwarg = i - npositional_only; cache->kw_strings[i_kwarg] = PyUnicode_InternFromString(name); if (cache->kw_strings[i_kwarg] == NULL) { - va_end(va); goto error; } } } - va_end(va); return 0; error: @@ -287,30 +263,26 @@ raise_missing_argument(const char *funcname, * @param args Python passed args (METH_FASTCALL) * @param len_args Number of arguments (not flagged) * @param kwnames Tuple as passed by METH_FASTCALL or NULL. - * @param ... List of arguments (see macro version). + * @param specs Array of argument specifications + * @param nspecs Number of argument specifications * * @return Returns 0 on success and -1 on failure. */ NPY_NO_EXPORT int _npy_parse_arguments(const char *funcname, - /* cache_ptr is a NULL initialized persistent storage for data */ _NpyArgParserCache *cache, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames, - /* ... is NULL, NULL, NULL terminated: name, converter, value */ - ...) + npy_arg_spec *specs, int nspecs) { - if (!npy_atomic_load_uint8(&cache->initialized)) { + if (!atomic_load_explicit((_Atomic(uint8_t) *)&cache->initialized, memory_order_acquire)) { LOCK_ARGPARSE_MUTEX; - if (!npy_atomic_load_uint8(&cache->initialized)) { - va_list va; - va_start(va, kwnames); - int res = initialize_keywords(funcname, cache, va); - va_end(va); + if (!atomic_load_explicit((_Atomic(uint8_t) *)&cache->initialized, memory_order_acquire)) { + int res = initialize_keywords(funcname, cache, specs, nspecs); if (res < 0) { UNLOCK_ARGPARSE_MUTEX; return -1; } - npy_atomic_store_uint8(&cache->initialized, 1); + atomic_store_explicit((_Atomic(uint8_t) *)&cache->initialized, 1, memory_order_release); } UNLOCK_ARGPARSE_MUTEX; } @@ -393,38 +365,33 @@ _npy_parse_arguments(const char *funcname, assert(len_args + len_kwargs <= cache->nargs); /* At this time `all_arguments` holds either NULLs or the objects */ - va_list va; - va_start(va, kwnames); - for (int i = 0; i < max_nargs; i++) { - va_arg(va, char *); - convert *converter = va_arg(va, convert *); - void *data = va_arg(va, void *); - if (all_arguments[i] == NULL) { continue; } - int res; + npy_arg_converter converter = (npy_arg_converter)specs[i].converter; + void *data = specs[i].output; + if (converter == NULL) { *((PyObject **) data) = all_arguments[i]; continue; } - res = converter(all_arguments[i], data); + int res = converter(all_arguments[i], data); if (NPY_UNLIKELY(res == NPY_SUCCEED)) { continue; } else if (NPY_UNLIKELY(res == NPY_FAIL)) { /* It is usually the users responsibility to clean up. */ - goto converting_failed; + return -1; } else if (NPY_UNLIKELY(res == Py_CLEANUP_SUPPORTED)) { /* TODO: Implementing cleanup if/when needed should not be hard */ PyErr_Format(PyExc_SystemError, "converter cleanup of parameter %d to %s() not supported.", i, funcname); - goto converting_failed; + return -1; } assert(0); } @@ -434,21 +401,15 @@ _npy_parse_arguments(const char *funcname, /* (PyArg_* also does this after the actual parsing is finished) */ if (NPY_UNLIKELY(max_nargs < cache->nrequired)) { raise_missing_argument(funcname, cache, max_nargs); - goto converting_failed; + return -1; } for (int i = 0; i < cache->nrequired; i++) { if (NPY_UNLIKELY(all_arguments[i] == NULL)) { raise_missing_argument(funcname, cache, i); - goto converting_failed; + return -1; } } } - va_end(va); return 0; - -converting_failed: - va_end(va); - return -1; - } diff --git a/numpy/_core/src/common/npy_argparse.h b/numpy/_core/src/common/npy_argparse.h index e1eef918cb33..f48ba90791fe 100644 --- a/numpy/_core/src/common/npy_argparse.h +++ b/numpy/_core/src/common/npy_argparse.h @@ -20,7 +20,15 @@ NPY_NO_EXPORT int PyArray_PythonPyIntFromInt(PyObject *obj, int *value); -#define _NPY_MAX_KWARGS 15 +#define _NPY_MAX_KWARGS 14 + +typedef int (*npy_arg_converter)(PyObject *, void *); + +typedef struct { + const char *name; + void *converter; + void *output; +} npy_arg_spec; typedef struct { int npositional; @@ -54,11 +62,10 @@ NPY_NO_EXPORT int init_argparse_mutex(void); * * PyObject *argument1, *argument3; * int argument2 = -1; - * if (npy_parse_arguments("method", args, len_args, kwnames), - * "argument1", NULL, &argument1, - * "|argument2", &PyArray_PythonPyIntFromInt, &argument2, - * "$argument3", NULL, &argument3, - * NULL, NULL, NULL) < 0) { + * if (npy_parse_arguments("method", args, len_args, kwnames, + * {"argument1", NULL, &argument1}, + * {"|argument2", &PyArray_PythonPyIntFromInt, &argument2}, + * {"$argument3", NULL, &argument3}) < 0) { * return NULL; * } * } @@ -66,32 +73,43 @@ NPY_NO_EXPORT int init_argparse_mutex(void); * * The `NPY_PREPARE_ARGPARSER` macro sets up a static cache variable necessary * to hold data for speeding up the parsing. `npy_parse_arguments` must be - * used in cunjunction with the macro defined in the same scope. + * used in conjunction with the macro defined in the same scope. * (No two `npy_parse_arguments` may share a single `NPY_PREPARE_ARGPARSER`.) * * @param funcname Function name * @param args Python passed args (METH_FASTCALL) * @param len_args Number of arguments (not flagged) * @param kwnames Tuple as passed by METH_FASTCALL or NULL. - * @param ... List of arguments must be param1_name, param1_converter, - * *param1_outvalue, param2_name, ..., NULL, NULL, NULL. - * Where name is ``char *``, ``converter`` a python converter - * function or NULL and ``outvalue`` is the ``void *`` passed to - * the converter (holding the converted data or a borrowed - * reference if converter is NULL). + * @param ... List of argument specs as {name, converter, outvalue} structs. + * Where name is ``const char *``, ``converter`` a python converter + * function pointer or NULL and ``outvalue`` is the ``void *`` + * passed to the converter (holding the converted data or a + * borrowed reference if converter is NULL). * * @return Returns 0 on success and -1 on failure. */ NPY_NO_EXPORT int _npy_parse_arguments(const char *funcname, - /* cache_ptr is a NULL initialized persistent storage for data */ - _NpyArgParserCache *cache_ptr, + _NpyArgParserCache *cache, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames, - /* va_list is NULL, NULL, NULL terminated: name, converter, value */ - ...) NPY_GCC_NONNULL(1); + npy_arg_spec *specs, int nspecs) NPY_GCC_NONNULL(1); -#define npy_parse_arguments(funcname, args, len_args, kwnames, ...) \ - _npy_parse_arguments(funcname, &__argparse_cache, \ - args, len_args, kwnames, __VA_ARGS__) +#ifdef __cplusplus +#define npy_parse_arguments(funcname, args, len_args, kwnames, ...) \ + [&]() -> int { \ + npy_arg_spec _npy_specs_[] = {__VA_ARGS__}; \ + return _npy_parse_arguments(funcname, &__argparse_cache, \ + args, len_args, kwnames, \ + _npy_specs_, \ + (int)(sizeof(_npy_specs_) / sizeof(npy_arg_spec))); \ + }() +#else +#define npy_parse_arguments(funcname, args, len_args, kwnames, ...) \ + _npy_parse_arguments(funcname, &__argparse_cache, \ + args, len_args, kwnames, \ + (npy_arg_spec[]){__VA_ARGS__}, \ + (int)(sizeof((npy_arg_spec[]){__VA_ARGS__}) \ + / sizeof(npy_arg_spec))) +#endif #endif /* NUMPY_CORE_SRC_COMMON_NPY_ARGPARSE_H */ diff --git a/numpy/_core/src/common/npy_atomic.h b/numpy/_core/src/common/npy_atomic.h deleted file mode 100644 index 61a31acc13e0..000000000000 --- a/numpy/_core/src/common/npy_atomic.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Provides wrappers around C11 standard library atomics and MSVC intrinsics - * to provide basic atomic load and store functionality. This is based on - * code in CPython's pyatomic.h, pyatomic_std.h, and pyatomic_msc.h - */ - -#ifndef NUMPY_CORE_SRC_COMMON_NPY_ATOMIC_H_ -#define NUMPY_CORE_SRC_COMMON_NPY_ATOMIC_H_ - -#include "numpy/npy_common.h" - -#ifdef __cplusplus - extern "C++" { - #include - } - #define _NPY_USING_STD using namespace std - #define _Atomic(tp) atomic - #define STDC_ATOMICS -#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \ - && !defined(__STDC_NO_ATOMICS__) - #include - #include - #define _NPY_USING_STD - #define STDC_ATOMICS -#elif _MSC_VER - #include - #define MSC_ATOMICS - #if !defined(_M_X64) && !defined(_M_IX86) && !defined(_M_ARM64) - #error "Unsupported MSVC build configuration, neither x86 or ARM" - #endif -#elif defined(__GNUC__) && (__GNUC__ > 4) - #define GCC_ATOMICS -#elif defined(__clang__) - #if __has_builtin(__atomic_load) - #define GCC_ATOMICS - #endif -#else - #error "no supported atomic implementation for this platform/compiler" -#endif - - -static inline npy_uint8 -npy_atomic_load_uint8(const npy_uint8 *obj) { -#ifdef STDC_ATOMICS - _NPY_USING_STD; - return (npy_uint8)atomic_load((const _Atomic(uint8_t)*)obj); -#elif defined(MSC_ATOMICS) -#if defined(_M_X64) || defined(_M_IX86) - return *(volatile npy_uint8 *)obj; -#else // defined(_M_ARM64) - return (npy_uint8)__ldar8((unsigned __int8 volatile *)obj); -#endif -#elif defined(GCC_ATOMICS) - return __atomic_load_n(obj, __ATOMIC_SEQ_CST); -#endif -} - -static inline void* -npy_atomic_load_ptr(const void *obj) { -#ifdef STDC_ATOMICS - _NPY_USING_STD; - return atomic_load((const _Atomic(void *)*)obj); -#elif defined(MSC_ATOMICS) -#if SIZEOF_VOID_P == 8 -#if defined(_M_X64) || defined(_M_IX86) - return (void *)*(volatile uint64_t *)obj; -#elif defined(_M_ARM64) - return (void *)__ldar64((unsigned __int64 volatile *)obj); -#endif -#else -#if defined(_M_X64) || defined(_M_IX86) - return (void *)*(volatile uint32_t *)obj; -#elif defined(_M_ARM64) - return (void *)__ldar32((unsigned __int32 volatile *)obj); -#endif -#endif -#elif defined(GCC_ATOMICS) - return (void *)__atomic_load_n((void * const *)obj, __ATOMIC_SEQ_CST); -#endif -} - -static inline npy_hash_t -npy_atomic_load_hash_t(const npy_hash_t *obj) { - assert(sizeof(npy_hash_t) == sizeof(void *)); - return (npy_hash_t)npy_atomic_load_ptr((const void *)obj); -} - -static inline void -npy_atomic_store_uint8(npy_uint8 *obj, npy_uint8 value) { -#ifdef STDC_ATOMICS - _NPY_USING_STD; - atomic_store((_Atomic(uint8_t)*)obj, value); -#elif defined(MSC_ATOMICS) - _InterlockedExchange8((volatile char *)obj, (char)value); -#elif defined(GCC_ATOMICS) - __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); -#endif -} - -static inline void -npy_atomic_store_ptr(void *obj, void *value) -{ -#ifdef STDC_ATOMICS - _NPY_USING_STD; - atomic_store((_Atomic(void *)*)obj, value); -#elif defined(MSC_ATOMICS) - _InterlockedExchangePointer((void * volatile *)obj, (void *)value); -#elif defined(GCC_ATOMICS) - __atomic_store_n((void **)obj, value, __ATOMIC_SEQ_CST); -#endif -} - -static inline void -npy_atomic_store_hash_t(npy_hash_t *obj, npy_hash_t value) { - assert(sizeof(npy_hash_t) == sizeof(void *)); - npy_atomic_store_ptr((void *)obj, (void *)value); -} - -#undef MSC_ATOMICS -#undef STDC_ATOMICS -#undef GCC_ATOMICS - -#endif // NUMPY_CORE_SRC_COMMON_NPY_NPY_ATOMIC_H_ diff --git a/numpy/_core/src/common/npy_cpu_dispatch.h b/numpy/_core/src/common/npy_cpu_dispatch.h index 49d29b8aa655..1bebc3b01be3 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.h +++ b/numpy/_core/src/common/npy_cpu_dispatch.h @@ -8,7 +8,7 @@ */ #include "npy_cpu_features.h" // NPY_CPU_HAVE /** - * This header genereated by the build system and contains: + * This header was generated by the build system and contains: * * - Headers for platform-specific instruction sets. * - Helper macros that encapsulate enabled features through user-defined build options @@ -79,7 +79,7 @@ npy_cpu_dispatch_trace(const char *func_name, const char *signature, * Extract the enabled CPU targets from the generated configuration file. * * This macro is used to extract the enabled CPU targets from the generated configuration file, - * which is derived from 'meson.multi_targets()' or from 'disutils.CCompilerOpt' in the case of using distutils. + * which is derived from 'meson.multi_targets()'. * It then calls 'npy_cpu_dispatch_trace()' to insert a new item into the '__cpu_targets_info__' dictionary, * based on the provided FUNC_NAME and SIGNATURE. * diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 91dafa96de0a..ba35c962bdd7 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -235,14 +235,13 @@ npy__cpu_validate_baseline(void) #define NPY__CPU_VALIDATE_CB(FEATURE, DUMMY) \ if (!npy__cpu_have[NPY_CAT(NPY_CPU_FEATURE_, FEATURE)]) { \ - const int size = sizeof(NPY_TOSTRING(FEATURE)); \ + const int size = sizeof(NPY_TOSTRING(FEATURE)) - 1; \ memcpy(fptr, NPY_TOSTRING(FEATURE), size); \ fptr[size] = ' '; fptr += size + 1; \ } NPY_WITH_CPU_BASELINE_CALL(NPY__CPU_VALIDATE_CB, DUMMY) // extra arg for msvc - *fptr = '\0'; - if (baseline_failure[0] != '\0') { + if (fptr > baseline_failure) { *(fptr-1) = '\0'; // trim the last space PyErr_Format(PyExc_RuntimeError, "NumPy was built with baseline optimizations: \n" @@ -448,7 +447,7 @@ npy__cpu_cpuid_count(int reg[4], int func_id, int count) static void npy__cpu_cpuid(int reg[4], int func_id) { - return npy__cpu_cpuid_count(reg, func_id, 0); + npy__cpu_cpuid_count(reg, func_id, 0); } static void @@ -503,7 +502,7 @@ npy__cpu_init_features(void) // long mode only npy__cpu_have[NPY_CPU_FEATURE_LAHF] = (reg[2] & (1 << 0)) != 0; #else - // alawys available + // always available npy__cpu_have[NPY_CPU_FEATURE_LAHF] = 1; #endif npy__cpu_have[NPY_CPU_FEATURE_LZCNT] = (reg[2] & (1 << 5)) != 0; diff --git a/numpy/_core/src/common/npy_hashtable.c b/numpy/_core/src/common/npy_hashtable.c new file mode 100644 index 000000000000..5086fd26af69 --- /dev/null +++ b/numpy/_core/src/common/npy_hashtable.c @@ -0,0 +1,304 @@ +/* Lock-free hash table implementation for identity based keys + * (C arrays of pointers) used for ufunc dispatching cache. + * + * This cache does not do any reference counting of the stored objects, + * and the stored pointers must remain valid while in the cache. + * The cache entries cannot be changed or deleted once added, only new + * entries can be added. It is thread safe and lock-free for reading, and + * uses a mutex for writing (adding new entries). See below for the details + * of thread safety. + * + * The actual hash table is stored in the `buckets` struct which contains + * a flexible array member for the keys and values. It avoids multiple + * atomic operations as resizing the hash table only requires a single atomic + * store to swap in the new buckets pointer. + * + * Thread safety notes for free-threading builds: + * - Reading from the cache (getting items) is lock-free and thread safe. + * The reader reads the current `buckets` pointer using an atomic load + * with memory_order_acquire order. This ensures that the reader + * synchronizes with any concurrent writers that may be resizing the cache. + * The value of item is then read using an atomic load with memory_order_acquire + * order so that it sees the key written by the writer before the value. + * + * - Writing to the cache (adding new items) uses ``tb->mutex`` mutex to + * ensure only one thread writes at a time. The new items are added + * concurrently with readers and synchronized using atomic operations. + * The key is stored first (using memcpy), and then the value is stored + * using an atomic store with memory_order_release order so that + * the store of key is visible to readers that see the value. + * + * - Resizing the cache uses the same mutex to ensure only one thread + * resizes at a time. The new larger cache is built while holding the + * mutex, and then swapped in using an atomic operation. Because, + * readers can be reading from the old cache while the new one is + * swapped in, the old cache is not free immediately. Instead, it is + * kept in a linked list of old caches using the `prev` pointer in the + * `buckets` struct. The old caches are only freed when the identity + * hash table is deallocated, ensuring that no readers are using them + * anymore. + */ + +#include "npy_hashtable.h" + +#include "templ_common.h" +#include + +// It is defined here instead of header to avoid flexible array member warning in C++. +struct buckets { + struct buckets *prev; /* linked list of old buckets */ + npy_intp size; /* current size */ + npy_intp nelem; /* number of elements */ + PyObject *array[]; /* array of keys and values */ +}; + +#if SIZEOF_PY_UHASH_T > 4 +#define _NpyHASH_XXPRIME_1 ((Py_uhash_t)11400714785074694791ULL) +#define _NpyHASH_XXPRIME_2 ((Py_uhash_t)14029467366897019727ULL) +#define _NpyHASH_XXPRIME_5 ((Py_uhash_t)2870177450012600261ULL) +#define _NpyHASH_XXROTATE(x) ((x << 31) | (x >> 33)) /* Rotate left 31 bits */ +#else +#define _NpyHASH_XXPRIME_1 ((Py_uhash_t)2654435761UL) +#define _NpyHASH_XXPRIME_2 ((Py_uhash_t)2246822519UL) +#define _NpyHASH_XXPRIME_5 ((Py_uhash_t)374761393UL) +#define _NpyHASH_XXROTATE(x) ((x << 13) | (x >> 19)) /* Rotate left 13 bits */ +#endif + +#ifdef Py_GIL_DISABLED +#define FT_ATOMIC_LOAD_PTR_ACQUIRE(ptr) \ + atomic_load_explicit((_Atomic(void *) *)&(ptr), memory_order_acquire) +#define FT_ATOMIC_STORE_PTR_RELEASE(ptr, val) \ + atomic_store_explicit((_Atomic(void *) *)&(ptr), (void *)(val), memory_order_release) +#else +#define FT_ATOMIC_LOAD_PTR_ACQUIRE(ptr) (ptr) +#define FT_ATOMIC_STORE_PTR_RELEASE(ptr, val) (ptr) = (val) +#endif + +/* + * This hashing function is basically the Python tuple hash with the type + * identity hash inlined. The tuple hash itself is a reduced version of xxHash. + * + * Users cannot control pointers, so we do not have to worry about DoS attacks? + */ +static inline Py_hash_t +identity_list_hash(PyObject *const *v, int len) +{ + Py_uhash_t acc = _NpyHASH_XXPRIME_5; + for (int i = 0; i < len; i++) { + /* + * Lane is the single item hash, which for us is the rotated pointer. + * Identical to the python type hash (pointers end with 0s normally). + */ + size_t y = (size_t)v[i]; + Py_uhash_t lane = (y >> 4) | (y << (8 * SIZEOF_VOID_P - 4)); + acc += lane * _NpyHASH_XXPRIME_2; + acc = _NpyHASH_XXROTATE(acc); + acc *= _NpyHASH_XXPRIME_1; + } + return acc; +} +#undef _NpyHASH_XXPRIME_1 +#undef _NpyHASH_XXPRIME_2 +#undef _NpyHASH_XXPRIME_5 +#undef _NpyHASH_XXROTATE + + +static inline PyObject ** +find_item_buckets(struct buckets *buckets, int key_len, PyObject *const *key, + PyObject **pvalue) +{ + Py_hash_t hash = identity_list_hash(key, key_len); + npy_uintp perturb = (npy_uintp)hash; + npy_intp mask = buckets->size - 1; + npy_intp bucket = (npy_intp)hash & mask; + + while (1) { + PyObject **item = &(buckets->array[bucket * (key_len + 1)]); + PyObject *val = FT_ATOMIC_LOAD_PTR_ACQUIRE(item[0]); + if (pvalue != NULL) { + *pvalue = val; + } + if (val == NULL) { + /* The item is not in the cache; return the empty bucket */ + return item; + } + if (memcmp(item+1, key, key_len * sizeof(PyObject *)) == 0) { + /* This is a match, so return the item/bucket */ + return item; + } + /* Hash collision, perturb like Python (must happen rarely!) */ + perturb >>= 5; /* Python uses the macro PERTURB_SHIFT == 5 */ + bucket = mask & (bucket * 5 + perturb + 1); + } +} + + +static inline PyObject ** +find_item(PyArrayIdentityHash const *tb, PyObject *const *key, PyObject **pvalue) +{ + struct buckets *buckets = FT_ATOMIC_LOAD_PTR_ACQUIRE(tb->buckets); + return find_item_buckets(buckets, tb->key_len, key, pvalue); +} + + +NPY_NO_EXPORT PyArrayIdentityHash * +PyArrayIdentityHash_New(int key_len) +{ + PyArrayIdentityHash *res = (PyArrayIdentityHash *)PyMem_Malloc(sizeof(PyArrayIdentityHash)); + if (res == NULL) { + PyErr_NoMemory(); + return NULL; + } + + assert(key_len > 0); + res->key_len = key_len; + + npy_intp initial_size = 4; /* Start with a size of 4 */ + + res->buckets = PyMem_Calloc(1, sizeof(struct buckets) + + initial_size * (key_len + 1) * sizeof(PyObject *)); + if (res->buckets == NULL) { + PyErr_NoMemory(); + PyMem_Free(res); + return NULL; + } + res->buckets->prev = NULL; + res->buckets->size = initial_size; + res->buckets->nelem = 0; + +#ifdef Py_GIL_DISABLED + res->mutex = (PyMutex){0}; +#endif + return res; +} + + +NPY_NO_EXPORT void +PyArrayIdentityHash_Dealloc(PyArrayIdentityHash *tb) +{ + struct buckets *b = tb->buckets; +#ifdef Py_GIL_DISABLED + // free all old buckets + while (b != NULL) { + struct buckets *prev = b->prev; + PyMem_Free(b); + b = prev; + } +#else + assert(b->prev == NULL); + PyMem_Free(b); +#endif + PyMem_Free(tb); +} + + +static int +_resize_if_necessary(PyArrayIdentityHash *tb) +{ +#ifdef Py_GIL_DISABLED + assert(PyMutex_IsLocked(&tb->mutex)); +#endif + struct buckets *old_buckets = tb->buckets; + int key_len = tb->key_len; + npy_intp prev_size = old_buckets->size; + assert(prev_size > 0); + + if ((old_buckets->nelem + 1) * 2 <= old_buckets->size) { + /* No resize necessary if load factor is not more than 0.5 */ + return 0; + } + + /* Double in size */ + npy_intp new_size = old_buckets->size * 2; + + npy_intp alloc_size; + if (npy_mul_sizes_with_overflow(&alloc_size, new_size, key_len + 1)) { + return -1; + } + struct buckets *new_buckets = (struct buckets *)PyMem_Calloc( + 1, sizeof(struct buckets) + alloc_size * sizeof(PyObject *)); + if (new_buckets == NULL) { + PyErr_NoMemory(); + return -1; + } + new_buckets->size = new_size; + new_buckets->nelem = 0; + for (npy_intp i = 0; i < prev_size; i++) { + PyObject **item = &old_buckets->array[i * (key_len + 1)]; + if (item[0] != NULL) { + PyObject **tb_item = find_item_buckets(new_buckets, key_len, item + 1, NULL); + memcpy(tb_item+1, item+1, key_len * sizeof(PyObject *)); + new_buckets->nelem++; + tb_item[0] = item[0]; + } + } +#ifdef Py_GIL_DISABLED + new_buckets->prev = old_buckets; +#else + PyMem_Free(old_buckets); +#endif + FT_ATOMIC_STORE_PTR_RELEASE(tb->buckets, new_buckets); + return 0; +} + + +/** + * Set an item in the identity hash table if it does not already exist. + * If it does exist, return the existing item. + * + * @param tb The mapping. + * @param key The key, must be a C-array of pointers of the length + * corresponding to the mapping. + * @param value Normally a Python object, no reference counting is done + * and it should not be NULL. + * @param result The resulting value, either the existing one or the + * newly added value. + * @returns 0 on success, -1 with a MemoryError set on failure. + */ +static inline int +PyArrayIdentityHash_SetItemDefaultLockHeld(PyArrayIdentityHash *tb, + PyObject *const *key, PyObject *default_value, PyObject **result) +{ +#ifdef Py_GIL_DISABLED + assert(PyMutex_IsLocked(&tb->mutex)); +#endif + assert(default_value != NULL); + if (_resize_if_necessary(tb) < 0) { + return -1; + } + + PyObject **tb_item = find_item(tb, key, NULL); + if (tb_item[0] == NULL) { + memcpy(tb_item+1, key, tb->key_len * sizeof(PyObject *)); + tb->buckets->nelem++; + FT_ATOMIC_STORE_PTR_RELEASE(tb_item[0], default_value); + *result = default_value; + } else { + *result = tb_item[0]; + } + + return 0; +} + +NPY_NO_EXPORT int +PyArrayIdentityHash_SetItemDefault(PyArrayIdentityHash *tb, + PyObject *const *key, PyObject *default_value, PyObject **result) +{ +#ifdef Py_GIL_DISABLED + PyMutex_Lock(&tb->mutex); +#endif + int ret = PyArrayIdentityHash_SetItemDefaultLockHeld(tb, key, default_value, result); +#ifdef Py_GIL_DISABLED + PyMutex_Unlock(&tb->mutex); +#endif + return ret; +} + + +NPY_NO_EXPORT PyObject * +PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key) +{ + PyObject *value = NULL; + find_item(tb, key, &value); + return value; +} diff --git a/numpy/_core/src/common/npy_hashtable.cpp b/numpy/_core/src/common/npy_hashtable.cpp deleted file mode 100644 index 27e014ca00e0..000000000000 --- a/numpy/_core/src/common/npy_hashtable.cpp +++ /dev/null @@ -1,259 +0,0 @@ -/* - * This functionality is designed specifically for the ufunc machinery to - * dispatch based on multiple DTypes. Since this is designed to be used - * as purely a cache, it currently does no reference counting. - * Even though this is a cache, there is currently no maximum size. It may - * make sense to limit the size, or count collisions: If too many collisions - * occur, we could grow the cache, otherwise, just replace an old item that - * was presumably not used for a long time. - * - * If a different part of NumPy requires a custom hashtable, the code should - * be reused with care since specializing it more for the ufunc dispatching - * case is likely desired. - */ - -#include "npy_hashtable.h" - -#include -#include - -#include "templ_common.h" -#include - - - -#if SIZEOF_PY_UHASH_T > 4 -#define _NpyHASH_XXPRIME_1 ((Py_uhash_t)11400714785074694791ULL) -#define _NpyHASH_XXPRIME_2 ((Py_uhash_t)14029467366897019727ULL) -#define _NpyHASH_XXPRIME_5 ((Py_uhash_t)2870177450012600261ULL) -#define _NpyHASH_XXROTATE(x) ((x << 31) | (x >> 33)) /* Rotate left 31 bits */ -#else -#define _NpyHASH_XXPRIME_1 ((Py_uhash_t)2654435761UL) -#define _NpyHASH_XXPRIME_2 ((Py_uhash_t)2246822519UL) -#define _NpyHASH_XXPRIME_5 ((Py_uhash_t)374761393UL) -#define _NpyHASH_XXROTATE(x) ((x << 13) | (x >> 19)) /* Rotate left 13 bits */ -#endif - -/* - * This hashing function is basically the Python tuple hash with the type - * identity hash inlined. The tuple hash itself is a reduced version of xxHash. - * - * Users cannot control pointers, so we do not have to worry about DoS attacks? - */ -static inline Py_hash_t -identity_list_hash(PyObject *const *v, int len) -{ - Py_uhash_t acc = _NpyHASH_XXPRIME_5; - for (int i = 0; i < len; i++) { - /* - * Lane is the single item hash, which for us is the rotated pointer. - * Identical to the python type hash (pointers end with 0s normally). - */ - size_t y = (size_t)v[i]; - Py_uhash_t lane = (y >> 4) | (y << (8 * SIZEOF_VOID_P - 4)); - acc += lane * _NpyHASH_XXPRIME_2; - acc = _NpyHASH_XXROTATE(acc); - acc *= _NpyHASH_XXPRIME_1; - } - return acc; -} -#undef _NpyHASH_XXPRIME_1 -#undef _NpyHASH_XXPRIME_2 -#undef _NpyHASH_XXPRIME_5 -#undef _NpyHASH_XXROTATE - - -static inline PyObject ** -find_item(PyArrayIdentityHash const *tb, PyObject *const *key) -{ - Py_hash_t hash = identity_list_hash(key, tb->key_len); - npy_uintp perturb = (npy_uintp)hash; - npy_intp bucket; - npy_intp mask = tb->size - 1 ; - PyObject **item; - - bucket = (npy_intp)hash & mask; - while (1) { - item = &(tb->buckets[bucket * (tb->key_len + 1)]); - - if (item[0] == NULL) { - /* The item is not in the cache; return the empty bucket */ - return item; - } - if (memcmp(item+1, key, tb->key_len * sizeof(PyObject *)) == 0) { - /* This is a match, so return the item/bucket */ - return item; - } - /* Hash collision, perturb like Python (must happen rarely!) */ - perturb >>= 5; /* Python uses the macro PERTURB_SHIFT == 5 */ - bucket = mask & (bucket * 5 + perturb + 1); - } -} - - -NPY_NO_EXPORT PyArrayIdentityHash * -PyArrayIdentityHash_New(int key_len) -{ - PyArrayIdentityHash *res = (PyArrayIdentityHash *)PyMem_Malloc(sizeof(PyArrayIdentityHash)); - if (res == NULL) { - PyErr_NoMemory(); - return NULL; - } - - assert(key_len > 0); - res->key_len = key_len; - res->size = 4; /* Start with a size of 4 */ - res->nelem = 0; - - res->buckets = (PyObject **)PyMem_Calloc(4 * (key_len + 1), sizeof(PyObject *)); - if (res->buckets == NULL) { - PyErr_NoMemory(); - PyMem_Free(res); - return NULL; - } - -#ifdef Py_GIL_DISABLED - res->mutex = new(std::nothrow) std::shared_mutex(); - if (res->mutex == nullptr) { - PyErr_NoMemory(); - PyMem_Free(res); - return NULL; - } -#endif - return res; -} - - -NPY_NO_EXPORT void -PyArrayIdentityHash_Dealloc(PyArrayIdentityHash *tb) -{ - PyMem_Free(tb->buckets); -#ifdef Py_GIL_DISABLED - delete (std::shared_mutex *)tb->mutex; -#endif - PyMem_Free(tb); -} - - -static int -_resize_if_necessary(PyArrayIdentityHash *tb) -{ - npy_intp new_size, prev_size = tb->size; - PyObject **old_table = tb->buckets; - assert(prev_size > 0); - - if ((tb->nelem + 1) * 2 > prev_size) { - /* Double in size */ - new_size = prev_size * 2; - } - else { - new_size = prev_size; - while ((tb->nelem + 8) * 2 < new_size / 2) { - /* - * Should possibly be improved. However, we assume that we - * almost never shrink. Still if we do, do not shrink as much - * as possible to avoid growing right away. - */ - new_size /= 2; - } - assert(new_size >= 4); - } - if (new_size == prev_size) { - return 0; - } - - npy_intp alloc_size; - if (npy_mul_sizes_with_overflow(&alloc_size, new_size, tb->key_len + 1)) { - return -1; - } - tb->buckets = (PyObject **)PyMem_Calloc(alloc_size, sizeof(PyObject *)); - if (tb->buckets == NULL) { - tb->buckets = old_table; - PyErr_NoMemory(); - return -1; - } - - tb->size = new_size; - for (npy_intp i = 0; i < prev_size; i++) { - PyObject **item = &old_table[i * (tb->key_len + 1)]; - if (item[0] != NULL) { - PyObject **tb_item = find_item(tb, item + 1); - tb_item[0] = item[0]; - memcpy(tb_item+1, item+1, tb->key_len * sizeof(PyObject *)); - } - } - PyMem_Free(old_table); - return 0; -} - - -/** - * Add an item to the identity cache. The storage location must not change - * unless the cache is cleared. - * - * @param tb The mapping. - * @param key The key, must be a C-array of pointers of the length - * corresponding to the mapping. - * @param value Normally a Python object, no reference counting is done. - * use NULL to clear an item. If the item does not exist, no - * action is performed for NULL. - * @param replace If 1, allow replacements. If replace is 0 an error is raised - * if the stored value is different from the value to be cached. If the - * value to be cached is identical to the stored value, the value to be - * cached is ignored and no error is raised. - * @returns 0 on success, -1 with a MemoryError or RuntimeError (if an item - * is added which is already in the cache and replace is 0). The - * caller should avoid the RuntimeError. - */ -NPY_NO_EXPORT int -PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, - PyObject *const *key, PyObject *value, int replace) -{ - if (value != NULL && _resize_if_necessary(tb) < 0) { - /* Shrink, only if a new value is added. */ - return -1; - } - - PyObject **tb_item = find_item(tb, key); - if (value != NULL) { - if (tb_item[0] != NULL && tb_item[0] != value && !replace) { - PyErr_SetString(PyExc_RuntimeError, - "Identity cache already includes an item with this key."); - return -1; - } - tb_item[0] = value; - memcpy(tb_item+1, key, tb->key_len * sizeof(PyObject *)); - tb->nelem += 1; - } - else { - /* Clear the bucket -- just the value should be enough though. */ - memset(tb_item, 0, (tb->key_len + 1) * sizeof(PyObject *)); - } - - return 0; -} - - -NPY_NO_EXPORT PyObject * -PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key) -{ - PyObject *res = find_item(tb, key)[0]; - return res; -} - -#ifdef Py_GIL_DISABLED - -NPY_NO_EXPORT PyObject * -PyArrayIdentityHash_GetItemWithLock(PyArrayIdentityHash *tb, PyObject *const *key) -{ - PyObject *res; - std::shared_mutex *mutex = (std::shared_mutex *)tb->mutex; - NPY_BEGIN_ALLOW_THREADS - mutex->lock_shared(); - NPY_END_ALLOW_THREADS - res = find_item(tb, key)[0]; - mutex->unlock_shared(); - return res; -} - -#endif // Py_GIL_DISABLED diff --git a/numpy/_core/src/common/npy_hashtable.h b/numpy/_core/src/common/npy_hashtable.h index 02acc12d3191..a369ba1ba59b 100644 --- a/numpy/_core/src/common/npy_hashtable.h +++ b/numpy/_core/src/common/npy_hashtable.h @@ -11,26 +11,20 @@ extern "C" { #endif +struct buckets; + typedef struct { - int key_len; /* number of identities used */ - /* Buckets stores: val1, key1[0], key1[1], ..., val2, key2[0], ... */ - PyObject **buckets; - npy_intp size; /* current size */ - npy_intp nelem; /* number of elements */ + int key_len; /* number of identities used */ + struct buckets *buckets; /* current buckets */ #ifdef Py_GIL_DISABLED - void *mutex; /* std::shared_mutex, prevents races to fill the cache */ + PyMutex mutex; #endif } PyArrayIdentityHash; NPY_NO_EXPORT int -PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, - PyObject *const *key, PyObject *value, int replace); - -#ifdef Py_GIL_DISABLED -NPY_NO_EXPORT PyObject * -PyArrayIdentityHash_GetItemWithLock(PyArrayIdentityHash *tb, PyObject *const *key); -#endif // Py_GIL_DISABLED +PyArrayIdentityHash_SetItemDefault(PyArrayIdentityHash *tb, + PyObject *const *key, PyObject *default_value, PyObject **result); NPY_NO_EXPORT PyObject * PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key); diff --git a/numpy/_core/src/common/npy_import.c b/numpy/_core/src/common/npy_import.c index a0308ff3e4c7..534d7b34020b 100644 --- a/numpy/_core/src/common/npy_import.c +++ b/numpy/_core/src/common/npy_import.c @@ -3,7 +3,7 @@ #include "numpy/ndarraytypes.h" #include "npy_import.h" -#include "npy_atomic.h" +#include NPY_VISIBILITY_HIDDEN npy_runtime_imports_struct npy_runtime_imports; @@ -60,3 +60,29 @@ npy_import_entry_point(const char *entry_point) { } return result; } + + +NPY_NO_EXPORT int +npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj) { + if (!atomic_load_explicit((_Atomic(PyObject *) *)obj, memory_order_acquire)) { + PyObject* value = npy_import(module, attr); + if (value == NULL) { + return -1; + } +#if PY_VERSION_HEX < 0x30d00b3 + PyThread_acquire_lock(npy_runtime_imports.import_mutex, WAIT_LOCK); +#else + PyMutex_Lock(&npy_runtime_imports.import_mutex); +#endif + if (!atomic_load_explicit((_Atomic(PyObject *) *)obj, memory_order_acquire)) { + atomic_store_explicit((_Atomic(PyObject *) *)obj, Py_NewRef(value), memory_order_release); + } +#if PY_VERSION_HEX < 0x30d00b3 + PyThread_release_lock(npy_runtime_imports.import_mutex); +#else + PyMutex_Unlock(&npy_runtime_imports.import_mutex); +#endif + Py_DECREF(value); + } + return 0; +} diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index fec1b22f3975..7bf1bcd88831 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -4,7 +4,6 @@ #include #include "numpy/npy_common.h" -#include "npy_atomic.h" #ifdef __cplusplus extern "C" { @@ -50,8 +49,6 @@ typedef struct npy_runtime_imports_struct { PyObject *_var; PyObject *_view_is_safe; PyObject *_void_scalar_to_string; - PyObject *sort; - PyObject *argsort; } npy_runtime_imports_struct; NPY_VISIBILITY_HIDDEN extern npy_runtime_imports_struct npy_runtime_imports; @@ -78,6 +75,19 @@ npy_import(const char *module, const char *attr) return ret; } +NPY_NO_EXPORT int +init_import_mutex(void); + +/*! \brief Import a Python object from an entry point string. + + * The name should be of the form "(module ':')? (object '.')* attr". + * If no module is present, it is assumed to be "numpy". + * On error, returns NULL. + */ +NPY_NO_EXPORT PyObject* +npy_import_entry_point(const char *entry_point); + + /*! \brief Fetch and cache Python object at runtime. * * Import a Python function and cache it for use. The function checks if @@ -91,42 +101,8 @@ npy_import(const char *module, const char *attr) * @param attr module attribute to cache. * @param obj Storage location for imported function. */ -static inline int -npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj) { - if (!npy_atomic_load_ptr(obj)) { - PyObject* value = npy_import(module, attr); - if (value == NULL) { - return -1; - } -#if PY_VERSION_HEX < 0x30d00b3 - PyThread_acquire_lock(npy_runtime_imports.import_mutex, WAIT_LOCK); -#else - PyMutex_Lock(&npy_runtime_imports.import_mutex); -#endif - if (!npy_atomic_load_ptr(obj)) { - npy_atomic_store_ptr(obj, Py_NewRef(value)); - } -#if PY_VERSION_HEX < 0x30d00b3 - PyThread_release_lock(npy_runtime_imports.import_mutex); -#else - PyMutex_Unlock(&npy_runtime_imports.import_mutex); -#endif - Py_DECREF(value); - } - return 0; -} - NPY_NO_EXPORT int -init_import_mutex(void); - -/*! \brief Import a Python object from an entry point string. - - * The name should be of the form "(module ':')? (object '.')* attr". - * If no module is present, it is assumed to be "numpy". - * On error, returns NULL. - */ -NPY_NO_EXPORT PyObject* -npy_import_entry_point(const char *entry_point); +npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj); #ifdef __cplusplus } diff --git a/numpy/_core/src/common/npy_pycompat.h b/numpy/_core/src/common/npy_pycompat.h index 605833a511b7..52d44b17283a 100644 --- a/numpy/_core/src/common/npy_pycompat.h +++ b/numpy/_core/src/common/npy_pycompat.h @@ -34,7 +34,7 @@ } \ } #else -#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) { +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) { do { (void)(original); } while (0) #define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() } #endif diff --git a/numpy/_core/src/common/npy_sort.c b/numpy/_core/src/common/npy_sort.c new file mode 100644 index 000000000000..632962e884dd --- /dev/null +++ b/numpy/_core/src/common/npy_sort.c @@ -0,0 +1,67 @@ +#include +#include +#include +#include "npy_sort.h" +#include "dtypemeta.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NPY_NO_EXPORT int +npy_default_sort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_CompareFunc *cmp = (PyArray_CompareFunc *)context->method->static_data; + + PyArrayMethod_SortParameters *sort_params = + (PyArrayMethod_SortParameters *)context->parameters; + PyArray_SortImpl *sort_func = NULL; + + switch (sort_params->flags) { + case NPY_SORT_DEFAULT: + sort_func = npy_quicksort_impl; + break; + case NPY_SORT_STABLE: + sort_func = npy_mergesort_impl; + break; + default: + PyErr_SetString(PyExc_ValueError, "Invalid sort kind"); + return -1; + } + + return sort_func(data[0], dimensions[0], context, + context->descriptors[0]->elsize, cmp); +} + +NPY_NO_EXPORT int +npy_default_argsort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_CompareFunc *cmp = (PyArray_CompareFunc *)context->method->static_data; + + PyArrayMethod_SortParameters *sort_params = + (PyArrayMethod_SortParameters *)context->parameters; + PyArray_ArgSortImpl *argsort_func = NULL; + + switch (sort_params->flags) { + case NPY_SORT_DEFAULT: + argsort_func = npy_aquicksort_impl; + break; + case NPY_SORT_STABLE: + argsort_func = npy_amergesort_impl; + break; + default: + PyErr_SetString(PyExc_ValueError, "Invalid sort kind"); + return -1; + } + + return argsort_func(data[0], (npy_intp *)data[1], dimensions[0], context, + context->descriptors[0]->elsize, cmp); +} + +#ifdef __cplusplus +} +#endif diff --git a/numpy/_core/src/common/npy_sort.h.src b/numpy/_core/src/common/npy_sort.h.src index 1f82b07659f4..95d6f9d1ee70 100644 --- a/numpy/_core/src/common/npy_sort.h.src +++ b/numpy/_core/src/common/npy_sort.h.src @@ -5,6 +5,7 @@ #include #include #include +#include #define NPY_ENOMEM 1 #define NPY_ECOMP 2 @@ -107,6 +108,18 @@ NPY_NO_EXPORT int npy_aheapsort(void *vec, npy_intp *ind, npy_intp cnt, void *ar NPY_NO_EXPORT int npy_amergesort(void *vec, npy_intp *ind, npy_intp cnt, void *arr); NPY_NO_EXPORT int npy_atimsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr); +/* + ***************************************************************************** + ** NEW-STYLE GENERIC SORT ** + ***************************************************************************** + */ + +NPY_NO_EXPORT int npy_default_sort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata); +NPY_NO_EXPORT int npy_default_argsort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata); /* ***************************************************************************** diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat index 90c06a4cae55..8636bccf29ad 160000 --- a/numpy/_core/src/common/pythoncapi-compat +++ b/numpy/_core/src/common/pythoncapi-compat @@ -1 +1 @@ -Subproject commit 90c06a4cae557bdbfa4f231a781d2b5c1a8f6d1c +Subproject commit 8636bccf29adfa23463f810b3c2830f7cff1e933 diff --git a/numpy/_core/src/common/raii_utils.hpp b/numpy/_core/src/common/raii_utils.hpp index e92d0eae9269..1049e97387f0 100644 --- a/numpy/_core/src/common/raii_utils.hpp +++ b/numpy/_core/src/common/raii_utils.hpp @@ -126,9 +126,11 @@ class SaveThreadState // // Instead of // +// Py_INCREF(descr); // npy_string_allocator *allocator = NpyString_acquire_allocator(descr); // [code that uses allocator] // NpyString_release_allocator(allocator); +// Py_DECREF(descr); // // use // @@ -139,16 +141,19 @@ class SaveThreadState // class NpyStringAcquireAllocator { + PyArray_StringDTypeObject *_descr; npy_string_allocator *_allocator; public: - NpyStringAcquireAllocator(PyArray_StringDTypeObject *descr) { - _allocator = NpyString_acquire_allocator(descr); + NpyStringAcquireAllocator(PyArray_StringDTypeObject *descr) : _descr(descr) { + Py_INCREF(_descr); + _allocator = NpyString_acquire_allocator(_descr); } ~NpyStringAcquireAllocator() { NpyString_release_allocator(_allocator); + Py_DECREF(_descr); } NpyStringAcquireAllocator(const NpyStringAcquireAllocator&) = delete; diff --git a/numpy/_core/src/common/simd/lsx/memory.h b/numpy/_core/src/common/simd/lsx/memory.h index 9c3e6442c6d6..aaf32e5ce58f 100644 --- a/numpy/_core/src/common/simd/lsx/memory.h +++ b/numpy/_core/src/common/simd/lsx/memory.h @@ -528,7 +528,7 @@ NPYV_IMPL_LSX_REST_PARTIAL_TYPES_PAIR(u64, s64) NPYV_IMPL_LSX_REST_PARTIAL_TYPES_PAIR(f64, s64) /************************************************************ - * de-interlave load / interleave contiguous store + * de-interleave load / interleave contiguous store ************************************************************/ // two channels #define NPYV_IMPL_LSX_MEM_INTERLEAVE(SFX, ZSFX) \ diff --git a/numpy/_core/src/common/simd/simd.hpp b/numpy/_core/src/common/simd/simd.hpp index 40556a68c59d..cef7d0ed191f 100644 --- a/numpy/_core/src/common/simd/simd.hpp +++ b/numpy/_core/src/common/simd/simd.hpp @@ -39,7 +39,7 @@ // Indicates if the SIMD operations are available for float16. #define NPY_HWY_F16 (NPY_HWY && HWY_HAVE_FLOAT16) -// Note: Highway requires SIMD extentions with native float32 support, so we don't need +// Note: Highway requires SIMD extensions with native float32 support, so we don't need // to check for it. // Indicates if the SIMD operations are available for float64. @@ -64,7 +64,7 @@ namespace simd { /// We can not import all the symbols from the HWY_NAMESPACE because it will /// conflict with the existing symbols in the numpy namespace. namespace hn = hwy::HWY_NAMESPACE; -// internaly used by the template header +// internally used by the template header template using _Tag = hn::ScalableTag; #endif diff --git a/numpy/_core/src/common/simd/vec/arithmetic.h b/numpy/_core/src/common/simd/vec/arithmetic.h index 85f4d6b26d68..8eccd491297a 100644 --- a/numpy/_core/src/common/simd/vec/arithmetic.h +++ b/numpy/_core/src/common/simd/vec/arithmetic.h @@ -286,7 +286,7 @@ NPY_FINLINE npyv_u64 npyv_divc_u64(npyv_u64 a, const npyv_u64x3 divisor) // divide each signed 64-bit element by a precomputed divisor (round towards zero) NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) { - npyv_b64 overflow = npyv_and_b64(vec_cmpeq(a, npyv_setall_s64(-1LL << 63)), (npyv_b64)divisor.val[1]); + npyv_b64 overflow = npyv_and_b64(vec_cmpeq(a, npyv_setall_s64(0x8000000000000000LL)), (npyv_b64)divisor.val[1]); npyv_s64 d = vec_sel(divisor.val[0], npyv_setall_s64(1), overflow); return vec_div(a, d); } diff --git a/numpy/_core/src/common/ufunc_override.c b/numpy/_core/src/common/ufunc_override.c index 0bcbea5baa30..1ed7165a4e83 100644 --- a/numpy/_core/src/common/ufunc_override.c +++ b/numpy/_core/src/common/ufunc_override.c @@ -105,7 +105,7 @@ PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject * if (PyTuple_CheckExact(*out_kwd_obj)) { /* * The C-API recommends calling PySequence_Fast before any of the other - * PySequence_Fast* functions. This is required for PyPy + * PySequence_Fast* functions. */ PyObject *seq; seq = PySequence_Fast(*out_kwd_obj, // noqa: borrowed-ref OK diff --git a/numpy/_core/src/common/umathmodule.h b/numpy/_core/src/common/umathmodule.h index 73d853341cda..9fc693685e70 100644 --- a/numpy/_core/src/common/umathmodule.h +++ b/numpy/_core/src/common/umathmodule.h @@ -9,7 +9,6 @@ NPY_NO_EXPORT PyObject * get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)); -PyObject * add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args); PyObject * ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)); diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 37c08e5528f6..ee36c8371293 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 37c08e5528f63ead9c7e4fd99ba454c1b1a3e3f7 +Subproject commit ee36c837129310be19c17c9108c6dc3f6ae06942 diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index 5261e8232a08..c6fb57ee2837 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -335,4 +335,45 @@ datetime_hash(PyArray_DatetimeMetaData *meta, npy_datetime dt); NPY_NO_EXPORT npy_hash_t timedelta_hash(PyArray_DatetimeMetaData *meta, npy_timedelta td); +/* + * Scale a datetime or timedelta value by num/denom, checking for overflow. + * + * Positive values compute *dt * num / denom. + * Negative values compute (*dt * num - (denom - 1)) / denom to round + * toward negative infinity. + * + * NPY_DATETIME_NAT is NPY_MIN_INT64 (i.e. -NPY_MAX_INT64 - 1). + * The asymmetric neg_limit formula ensures that a valid *dt * num never + * produces NPY_MIN_INT64, which would be misinterpreted as NaT. + * + * NaT values pass through unchanged. + * + * Returns 0 on success, -1 on overflow (with PyExc_OverflowError set). + */ +static inline int +_datetime_scale_with_overflow_check( + npy_int64 *dt, npy_int64 num, npy_int64 denom, + const char *type_name) +{ + if (*dt == NPY_DATETIME_NAT) { + return 0; + } + npy_int64 pos_limit = NPY_MAX_INT64 / num; + npy_int64 neg_limit = (NPY_MAX_INT64 - denom + 1) / num; + + if (*dt > pos_limit || *dt < -neg_limit) { + PyErr_Format(PyExc_OverflowError, + "Overflow when converting between " + "%s units", type_name); + return -1; + } + if (*dt < 0) { + *dt = (*dt * num - (denom - 1)) / denom; + } + else { + *dt = *dt * num / denom; + } + return 0; +} + #endif /* NUMPY_CORE_SRC_MULTIARRAY__DATETIME_H_ */ diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index b79908e1d5e4..b84868933ad6 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -33,11 +33,10 @@ argparse_example_function(PyObject *NPY_UNUSED(mod), int arg1; PyObject *arg2, *arg3, *arg4; if (npy_parse_arguments("func", args, len_args, kwnames, - "", &PyArray_PythonPyIntFromInt, &arg1, - "arg2", NULL, &arg2, - "|arg3", NULL, &arg3, - "$arg3", NULL, &arg4, - NULL, NULL, NULL) < 0) { + {"", &PyArray_PythonPyIntFromInt, &arg1}, + {"arg2", NULL, &arg2}, + {"|arg3", NULL, &arg3}, + {"$arg3", NULL, &arg4}) < 0) { return NULL; } Py_RETURN_NONE; @@ -57,9 +56,8 @@ threaded_argparse_example_function(PyObject *NPY_UNUSED(mod), int arg1; PyObject *arg2; if (npy_parse_arguments("thread_func", args, len_args, kwnames, - "$arg1", &PyArray_PythonPyIntFromInt, &arg1, - "$arg2", NULL, &arg2, - NULL, NULL, NULL) < 0) { + {"$arg1", &PyArray_PythonPyIntFromInt, &arg1}, + {"$arg2", NULL, &arg2}) < 0) { return NULL; } Py_RETURN_NONE; @@ -676,7 +674,7 @@ npy_create_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args) return array; } -/* used to test WRITEBACKIFCOPY without resolution emits runtime warning */ +/* used to test WRITEBACKIFCOPY without resolution, emits runtime warning */ static PyObject* npy_abuse_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args) { @@ -690,7 +688,7 @@ npy_abuse_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args) array = PyArray_FromArray((PyArrayObject*)args, NULL, flags); if (array == NULL) return NULL; - Py_DECREF(array); /* calls array_dealloc even on PyPy */ + Py_DECREF(array); /* calls array_dealloc */ Py_RETURN_NONE; } @@ -923,36 +921,24 @@ get_all_cast_information(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) return NULL; } +static void +identity_cache_destructor(PyObject *capsule) +{ + PyArrayIdentityHash *tb = (PyArrayIdentityHash *)PyCapsule_GetPointer(capsule, "PyArrayIdentityHash"); + assert(tb != NULL); + PyArrayIdentityHash_Dealloc(tb); +} /* - * Helper to test the identity cache, takes a list of values and adds - * all to the cache except the last key/value pair. The last value is - * ignored, instead the last key is looked up. - * None is returned, if the key is not found. - * If `replace` is True, duplicate entries are ignored when adding to the - * hashtable. + * Create an identity hash table with the given key length and return it + * as a capsule. */ static PyObject * -identityhash_tester(PyObject *NPY_UNUSED(mod), - PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +create_identity_hash(PyObject *NPY_UNUSED(self), PyObject *args) { - NPY_PREPARE_ARGPARSER; - int key_len; - int replace; - PyObject *replace_obj = Py_False; - PyObject *sequence; - PyObject *result = NULL; - if (npy_parse_arguments("identityhash_tester", args, len_args, kwnames, - "key_len", &PyArray_PythonPyIntFromInt, &key_len, - "sequence", NULL, &sequence, - "|replace", NULL, &replace_obj, - NULL, NULL, NULL) < 0) { - return NULL; - } - replace = PyObject_IsTrue(replace_obj); - if (error_converting(replace)) { + if (!PyArg_ParseTuple(args, "i", &key_len)) { return NULL; } @@ -960,52 +946,91 @@ identityhash_tester(PyObject *NPY_UNUSED(mod), PyErr_SetString(PyExc_ValueError, "must have 1 to max-args keys."); return NULL; } + PyArrayIdentityHash *tb = PyArrayIdentityHash_New(key_len); if (tb == NULL) { return NULL; } - /* Replace the sequence with a guaranteed fast-sequence */ - sequence = PySequence_Fast(sequence, "converting sequence."); // noqa: borrowed-ref OK - if (sequence == NULL) { - goto finish; + PyObject *capsule = PyCapsule_New((void *)tb, "PyArrayIdentityHash", + identity_cache_destructor); + if (capsule == NULL) { + PyArrayIdentityHash_Dealloc(tb); + return NULL; } - Py_ssize_t length = PySequence_Fast_GET_SIZE(sequence); - for (Py_ssize_t i = 0; i < length; i++) { - PyObject *key_val = PySequence_Fast_GET_ITEM(sequence, i); - if (!PyTuple_CheckExact(key_val) || PyTuple_GET_SIZE(key_val) != 2) { - PyErr_SetString(PyExc_TypeError, "bad key-value pair."); - goto finish; - } - PyObject *key = PyTuple_GET_ITEM(key_val, 0); - PyObject *value = PyTuple_GET_ITEM(key_val, 1); - if (!PyTuple_CheckExact(key) || PyTuple_GET_SIZE(key) != key_len) { - PyErr_SetString(PyExc_TypeError, "bad key tuple."); - goto finish; - } + return capsule; +} - PyObject *keys[NPY_MAXARGS]; - for (int j = 0; j < key_len; j++) { - keys[j] = PyTuple_GET_ITEM(key, j); - } - if (i != length - 1) { - if (PyArrayIdentityHash_SetItem(tb, keys, value, replace) < 0) { - goto finish; - } - } - else { - result = PyArrayIdentityHash_GetItem(tb, keys); - if (result == NULL) { - result = Py_None; - } - Py_INCREF(result); - } +/* + * Set default item in identity hash table provided as capsule and key as tuple. + * If the key is already present, return the existing value else set to value and + * return that. + */ +static PyObject * +identity_hash_set_item_default(PyObject *NPY_UNUSED(self), PyObject *args) +{ + PyObject *capsule, *key_tuple, *value; + if (!PyArg_ParseTuple(args, "OOO", &capsule, &key_tuple, &value)) { + return NULL; } - finish: - Py_DECREF(sequence); - PyArrayIdentityHash_Dealloc(tb); + if (!PyCapsule_IsValid(capsule, "PyArrayIdentityHash")) { + PyErr_SetString(PyExc_TypeError, + "First argument must be a valid PyArrayIdentityHash capsule."); + return NULL; + } + + PyArrayIdentityHash *tb = (PyArrayIdentityHash *)PyCapsule_GetPointer(capsule, "PyArrayIdentityHash"); + assert(tb != NULL); + + if (!PyTuple_CheckExact(key_tuple) || PyTuple_GET_SIZE(key_tuple) != tb->key_len) { + PyErr_Format(PyExc_TypeError, + "key must be a tuple of length %d", tb->key_len); + return NULL; + } + + PyObject *result = NULL; + if (PyArrayIdentityHash_SetItemDefault(tb, &PyTuple_GET_ITEM(key_tuple, 0), value, &result) < 0) { + return NULL; + } + Py_INCREF(result); + return result; +} + + +/* + * Get item from identity hash table provided as capsule and key as tuple. + */ +static PyObject * +identity_hash_get_item(PyObject *NPY_UNUSED(self), PyObject *args) +{ + PyObject *capsule, *key_tuple; + + if (!PyArg_ParseTuple(args, "OO", &capsule, &key_tuple)) { + return NULL; + } + + if (!PyCapsule_IsValid(capsule, "PyArrayIdentityHash")) { + PyErr_SetString(PyExc_TypeError, + "First argument must be a valid PyArrayIdentityHash capsule."); + return NULL; + } + + PyArrayIdentityHash *tb = (PyArrayIdentityHash *)PyCapsule_GetPointer(capsule, "PyArrayIdentityHash"); + assert(tb != NULL); + + if (!PyTuple_CheckExact(key_tuple) || PyTuple_GET_SIZE(key_tuple) != tb->key_len) { + PyErr_Format(PyExc_TypeError, + "key must be a tuple of length %d", tb->key_len); + return NULL; + } + + PyObject *result = PyArrayIdentityHash_GetItem(tb, &PyTuple_GET_ITEM(key_tuple, 0)); + if (result == NULL) { + Py_RETURN_NONE; + } + Py_INCREF(result); return result; } @@ -2309,9 +2334,15 @@ static PyMethodDef Multiarray_TestsMethods[] = { "Return a list with info on all available casts. Some of the info" "may differ for an actual cast if it uses value-based casting " "(flexible types)."}, - {"identityhash_tester", - (PyCFunction)identityhash_tester, - METH_KEYWORDS | METH_FASTCALL, NULL}, + {"create_identity_hash", + create_identity_hash, + METH_VARARGS, "Create a new PyArrayIdentityHash wrapped in a PyCapsule."}, + {"identity_hash_set_item_default", + (PyCFunction)identity_hash_set_item_default, + METH_VARARGS, "Set a default item in a PyArrayIdentityHash capsule."}, + {"identity_hash_get_item", + identity_hash_get_item, + METH_VARARGS, "Get an item from a PyArrayIdentityHash capsule."}, {"array_indexing", array_indexing, METH_VARARGS, NULL}, diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index 8061feed24e5..386fcb086863 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -27,11 +27,17 @@ #endif #endif -/* Do not enable the alloc cache if the GIL is disabled, or if ASAN or MSAN - * instrumentation is enabled. The cache makes ASAN use-after-free or MSAN - * use-of-uninitialized-memory warnings less useful. */ + +/* + * CPython uses mimalloc on the free-threaded build, which we trust to cache + * allocations better than we can. + */ #ifdef Py_GIL_DISABLED # define USE_ALLOC_CACHE 0 +/* + * The cache makes ASAN use-after-free or MSAN use-of-uninitialized-memory + * warnings less useful. + */ #elif defined(__has_feature) # if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) # define USE_ALLOC_CACHE 0 @@ -64,7 +70,7 @@ NPY_NO_EXPORT PyObject * _get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { #ifdef NPY_OS_LINUX - if (npy_thread_unsafe_state.madvise_hugepage) { + if (npy_global_state.madvise_hugepage) { Py_RETURN_TRUE; } #endif @@ -82,12 +88,12 @@ _get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) NPY_NO_EXPORT PyObject * _set_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *enabled_obj) { - int was_enabled = npy_thread_unsafe_state.madvise_hugepage; + int was_enabled = npy_global_state.madvise_hugepage; int enabled = PyObject_IsTrue(enabled_obj); if (enabled < 0) { return NULL; } - npy_thread_unsafe_state.madvise_hugepage = enabled; + npy_global_state.madvise_hugepage = enabled; if (was_enabled) { Py_RETURN_TRUE; } @@ -100,7 +106,7 @@ indicate_hugepages(void *p, size_t size) { #ifdef NPY_OS_LINUX /* allow kernel allocating huge pages for large arrays */ if (NPY_UNLIKELY(size >= ((1u<<22u))) && - npy_thread_unsafe_state.madvise_hugepage) { + npy_global_state.madvise_hugepage) { npy_uintp offset = 4096u - (npy_uintp)p % (4096u); npy_uintp length = size - offset; /** @@ -138,9 +144,6 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz, #endif p = alloc(nelem * esz); if (p) { -#ifdef _PyPyGC_AddMemoryPressure - _PyPyPyGC_AddMemoryPressure(nelem * esz); -#endif indicate_hugepages(p, nelem * esz); } return p; @@ -182,7 +185,6 @@ npy_alloc_cache_zero(size_t nmemb, size_t size) { void * p; size_t sz = nmemb * size; - NPY_BEGIN_THREADS_DEF; if (sz < NBUCKETS) { p = _npy_alloc_cache(sz, 1, NBUCKETS, datacache, &PyDataMem_NEW); if (p) { @@ -190,9 +192,7 @@ npy_alloc_cache_zero(size_t nmemb, size_t size) } return p; } - NPY_BEGIN_THREADS; p = PyDataMem_NEW_ZEROED(nmemb, size); - NPY_END_THREADS; if (p) { indicate_hugepages(p, sz); } @@ -220,7 +220,7 @@ npy_alloc_cache_dim(npy_uintp sz) sz = 2; } return _npy_alloc_cache(sz, sizeof(npy_intp), NBUCKETS_DIM, dimcache, - &PyArray_malloc); + &PyMem_RawMalloc); } NPY_NO_EXPORT void @@ -261,10 +261,18 @@ PyDataMem_NEW(size_t size) void *result; assert(size != 0); - result = malloc(size); - int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + result = PyMem_RawMalloc(size); + if (result == NULL) { + return NULL; + } + int ret = PyTraceMalloc_Untrack(PYMEM_DOMAIN_RAW, (npy_uintp)result); + if (ret == -2) { + // tracemalloc is disabled + return result; + } + ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); if (ret == -1) { - free(result); + PyMem_RawFree(result); return NULL; } return result; @@ -278,10 +286,18 @@ PyDataMem_NEW_ZEROED(size_t nmemb, size_t size) { void *result; - result = calloc(nmemb, size); - int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + result = PyMem_RawCalloc(nmemb, size); + if (result == NULL) { + return NULL; + } + int ret = PyTraceMalloc_Untrack(PYMEM_DOMAIN_RAW, (npy_uintp)result); + if (ret == -2) { + // tracemalloc is disabled + return result; + } + ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); if (ret == -1) { - free(result); + PyMem_RawFree(result); return NULL; } return result; @@ -294,7 +310,7 @@ NPY_NO_EXPORT void PyDataMem_FREE(void *ptr) { PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); - free(ptr); + PyMem_RawFree(ptr); } /*NUMPY_API @@ -306,11 +322,21 @@ PyDataMem_RENEW(void *ptr, size_t size) void *result; assert(size != 0); - PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); - result = realloc(ptr, size); - int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + + result = PyMem_RawRealloc(ptr, size); + if (result == NULL) { + // ptr is still valid here + return NULL; + } + int ret = PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + if (ret == -2) { + // tracemalloc is disabled + return result; + } + ret = PyTraceMalloc_Untrack(PYMEM_DOMAIN_RAW, (npy_uintp)result); + ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); if (ret == -1) { - free(result); + PyMem_RawFree(result); return NULL; } return result; @@ -322,7 +348,16 @@ PyDataMem_RENEW(void *ptr, size_t size) static inline void * default_malloc(void *NPY_UNUSED(ctx), size_t size) { - return _npy_alloc_cache(size, 1, NBUCKETS, datacache, &malloc); + void *result; + result = _npy_alloc_cache(size, 1, NBUCKETS, datacache, &PyMem_RawMalloc); + if (result == NULL) { + // alloc failed, nothing more to do + return NULL; + } + // untrack the allocation from tracemalloc + // ignore return value, since we'd early return either way + PyTraceMalloc_Untrack(PYMEM_DOMAIN_RAW, (npy_uintp)result); + return result; } // The default data mem allocator calloc routine does not make use of a ctx. @@ -333,20 +368,25 @@ default_calloc(void *NPY_UNUSED(ctx), size_t nelem, size_t elsize) { void * p; size_t sz = nelem * elsize; - NPY_BEGIN_THREADS_DEF; if (sz < NBUCKETS) { - p = _npy_alloc_cache(sz, 1, NBUCKETS, datacache, &malloc); - if (p) { - memset(p, 0, sz); + p = _npy_alloc_cache(sz, 1, NBUCKETS, datacache, &PyMem_RawMalloc); + if (p == NULL) { + return NULL; + } + memset(p, 0, sz); + } else { + p = PyMem_RawCalloc(nelem, elsize); + if (p == NULL) { + return NULL; } - return p; - } - NPY_BEGIN_THREADS; - p = calloc(nelem, elsize); - if (p) { indicate_hugepages(p, sz); } - NPY_END_THREADS; + // untrack the allocation from tracemalloc + int ret = PyTraceMalloc_Untrack(PYMEM_DOMAIN_RAW, (npy_uintp)p); + if (ret == -2) { + // tracemalloc is disabled + return p; + } return p; } @@ -356,7 +396,19 @@ default_calloc(void *NPY_UNUSED(ctx), size_t nelem, size_t elsize) static inline void * default_realloc(void *NPY_UNUSED(ctx), void *ptr, size_t new_size) { - return realloc(ptr, new_size); + void *result; + result = PyMem_RawRealloc(ptr, new_size); + if (result == NULL) { + // realloc failed, nothing more to do + return NULL; + } + // untrack the reallocation from tracemalloc + int ret = PyTraceMalloc_Untrack(PYMEM_DOMAIN_RAW, (npy_uintp)result); + if (ret == -2) { + // tracemalloc is disabled + return result; + } + return result; } // The default data mem allocator free routine does not make use of a ctx. @@ -365,7 +417,7 @@ default_realloc(void *NPY_UNUSED(ctx), void *ptr, size_t new_size) static inline void default_free(void *NPY_UNUSED(ctx), void *ptr, size_t size) { - _npy_free_cache(ptr, size, NBUCKETS, datacache, &free); + _npy_free_cache(ptr, size, NBUCKETS, datacache, &PyMem_RawFree); } /* Memory handler global default */ @@ -399,6 +451,9 @@ PyDataMem_UserNEW(size_t size, PyObject *mem_handler) } assert(size != 0); result = handler->allocator.malloc(handler->allocator.ctx, size); + if (result == NULL) { + return NULL; + } int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); if (ret == -1) { handler->allocator.free(handler->allocator.ctx, result, size); @@ -417,6 +472,9 @@ PyDataMem_UserNEW_ZEROED(size_t nmemb, size_t size, PyObject *mem_handler) return NULL; } result = handler->allocator.calloc(handler->allocator.ctx, nmemb, size); + if (result == NULL) { + return NULL; + } int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); if (ret == -1) { handler->allocator.free(handler->allocator.ctx, result, size); @@ -436,6 +494,7 @@ PyDataMem_UserFREE(void *ptr, size_t size, PyObject *mem_handler) "Could not get pointer to 'mem_handler' from PyCapsule"); return; } + // ignore -2 return when tracemalloc is disabled PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); handler->allocator.free(handler->allocator.ctx, ptr, size); } @@ -451,9 +510,17 @@ PyDataMem_UserRENEW(void *ptr, size_t size, PyObject *mem_handler) } assert(size != 0); - PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); result = handler->allocator.realloc(handler->allocator.ctx, ptr, size); - int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (result == NULL) { + // ptr is still valid here + return NULL; + } + int ret = PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + if (ret == -2) { + // tracemalloc is disabled + return result; + } + ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); if (ret == -1) { handler->allocator.free(handler->allocator.ctx, result, size); return NULL; diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index 2de639611bf6..fabb5695134e 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -920,10 +920,7 @@ PyArray_AdaptDescriptorToArray( return descr; } if (dtype == NULL) { - res = PyArray_ExtractDTypeAndDescriptor(descr, &new_descr, &dtype); - if (res < 0) { - return NULL; - } + PyArray_ExtractDTypeAndDescriptor(descr, &new_descr, &dtype); if (new_descr != NULL) { Py_DECREF(dtype); return new_descr; @@ -1034,7 +1031,7 @@ PyArray_DiscoverDTypeAndShape_Recursive( } int was_copied_by__array__ = 0; arr = (PyArrayObject *)_array_from_array_like(obj, - requested_descr, 0, NULL, copy, &was_copied_by__array__); + requested_descr, 0, copy, &was_copied_by__array__); if (arr == NULL) { return -1; } @@ -1138,7 +1135,7 @@ PyArray_DiscoverDTypeAndShape_Recursive( force_sequence_due_to_char_dtype: - /* Ensure we have a sequence (required for PyPy) */ + /* Ensure we have a sequence */ seq = PySequence_Fast(obj, "Could not convert object to sequence"); // noqa: borrowed-ref - manual fix needed if (seq == NULL) { /* @@ -1159,6 +1156,10 @@ PyArray_DiscoverDTypeAndShape_Recursive( return -1; } + int ret = -1; + + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(obj); + npy_intp size = PySequence_Fast_GET_SIZE(seq); PyObject **objects = PySequence_Fast_ITEMS(seq); @@ -1166,17 +1167,19 @@ PyArray_DiscoverDTypeAndShape_Recursive( out_shape, 1, &size, NPY_TRUE, flags) < 0) { /* But do update, if there this is a ragged case */ *flags |= FOUND_RAGGED_ARRAY; - return max_dims; + ret = max_dims; + goto finish; } if (size == 0) { /* If the sequence is empty, this must be the last dimension */ *flags |= MAX_DIMS_WAS_REACHED; - return curr_dims + 1; + ret = curr_dims + 1; + goto finish; } /* Allow keyboard interrupts. See gh issue 18117. */ if (PyErr_CheckSignals() < 0) { - return -1; + goto finish; } /* @@ -1196,10 +1199,16 @@ PyArray_DiscoverDTypeAndShape_Recursive( flags, copy); if (max_dims < 0) { - return -1; + goto finish; } } - return max_dims; + ret = max_dims; + + finish:; + + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); + + return ret; } @@ -1408,9 +1417,8 @@ _discover_array_parameters(PyObject *NPY_UNUSED(self), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments( "_discover_array_parameters", args, len_args, kwnames, - "", NULL, &obj, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - NULL, NULL, NULL) < 0) { + {"", NULL, &obj}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}) < 0) { /* fixed is last to parse, so never necessary to clean up */ return NULL; } diff --git a/numpy/_core/src/multiarray/array_converter.c b/numpy/_core/src/multiarray/array_converter.c index 578e7b1554f4..27850491646b 100644 --- a/numpy/_core/src/multiarray/array_converter.c +++ b/numpy/_core/src/multiarray/array_converter.c @@ -83,7 +83,7 @@ array_converter_new( } else { item->array = (PyArrayObject *)PyArray_FromAny_int( - item->object, NULL, NULL, 0, NPY_MAXDIMS, 0, NULL, + item->object, NULL, NULL, 0, NPY_MAXDIMS, 0, &item->scalar_input); if (item->array == NULL) { goto fail; @@ -221,11 +221,10 @@ array_converter_as_arrays(PyArrayArrayConverterObject *self, scalar_policy policy = CONVERT_IF_NO_ARRAY; NPY_PREPARE_ARGPARSER; + /* pyscalars: how to handle scalars (ignored if dtype is given). */ if (npy_parse_arguments("as_arrays", args, len_args, kwnames, - "$subok", &PyArray_BoolConverter, &subok, - /* how to handle scalars (ignored if dtype is given). */ - "$pyscalars", &pyscalar_mode_conv, &policy, - NULL, NULL, NULL) < 0) { + {"$subok", &PyArray_BoolConverter, &subok}, + {"$pyscalars", &pyscalar_mode_conv, &policy}) < 0) { return NULL; } if (policy == CONVERT_IF_NO_ARRAY) { @@ -286,11 +285,10 @@ array_converter_wrap(PyArrayArrayConverterObject *self, } NPY_PREPARE_ARGPARSER; + /* to_scalar is three-way "bool", if `None` inspect input to decide. */ if (npy_parse_arguments("wrap", args, len_args, kwnames, - "", NULL, &obj, - /* Three-way "bool", if `None` inspect input to decide. */ - "$to_scalar", NULL, &to_scalar, - NULL, NULL, NULL) < 0) { + {"", NULL, &obj}, + {"$to_scalar", NULL, &to_scalar}) < 0) { return NULL; } if (to_scalar == Py_None) { @@ -327,9 +325,8 @@ array_converter_result_type(PyArrayArrayConverterObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("result_type", args, len_args, kwnames, - "|extra_dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "|ensure_inexact", &PyArray_BoolConverter, &ensure_inexact, - NULL, NULL, NULL) < 0) { + {"|extra_dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"|ensure_inexact", &PyArray_BoolConverter, &ensure_inexact}) < 0) { goto finish; } diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index c7280435d3c3..ffedca11d3d6 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -86,7 +86,7 @@ default_resolve_descriptors( * abstract ones or unspecified outputs). We can use the common-dtype * operation to provide a default here. */ - if (method->casting == NPY_NO_CASTING) { + if (method->casting == NPY_NO_CASTING && (method->flags & _NPY_METH_IS_CAST)) { /* * By (current) definition no-casting should imply viewable. This * is currently indicated for example for object to object cast. @@ -129,9 +129,9 @@ is_contiguous( * param move_references UNUSED -- listed below but doxygen doesn't see as a parameter * @param strides Array of step sizes for each dimension of the arrays involved * @param out_loop Output pointer to the function that will perform the strided loop. - * @param out_transferdata Output pointer to auxiliary data (if any) + * @param out_transferdata Output pointer to auxiliary data (if any) * needed by the out_loop function. - * @param flags Output pointer to additional flags (if any) + * @param flags Output pointer to additional flags (if any) * needed by the out_loop function * @returns 0 on success -1 on failure. */ @@ -485,7 +485,12 @@ PyArrayMethod_FromSpec_int(PyArrayMethod_Spec *spec, int private) return NULL; } strcpy(res->method->name, spec->name); - +#ifdef Py_GIL_DISABLED + // Mark immortal to reduce reference count contention in PyArray_GetCastingImpl + // If we ever allow replacing ArrayMethod objects or cleanup it DTypes or ufuncs, this may need to be reconsidered. + // An alternative that might help is to store cast methods in a PyArrayIdentityHash instead of a dict. + PyUnstable_SetImmortal((PyObject *)res->method); +#endif return res; } @@ -567,7 +572,7 @@ boundarraymethod_dealloc(PyObject *self) * changes and especially testing if they were to be made public. */ static PyObject * -boundarraymethod__resolve_descripors( +boundarraymethod__resolve_descriptors( PyBoundArrayMethodObject *self, PyObject *descr_tuple) { int nin = self->method->nin; @@ -957,7 +962,7 @@ PyArrayMethod_GetMaskedStridedLoop( PyMethodDef boundarraymethod_methods[] = { - {"_resolve_descriptors", (PyCFunction)boundarraymethod__resolve_descripors, + {"_resolve_descriptors", (PyCFunction)boundarraymethod__resolve_descriptors, METH_O, "Resolve the given dtypes."}, {"_simple_strided_call", (PyCFunction)boundarraymethod__simple_strided_call, METH_O, "call on 1-d inputs and pre-allocated outputs (single call)."}, diff --git a/numpy/_core/src/multiarray/arrayobject.c b/numpy/_core/src/multiarray/arrayobject.c index 6f520fd6abbb..460b007d7a7d 100644 --- a/numpy/_core/src/multiarray/arrayobject.c +++ b/numpy/_core/src/multiarray/arrayobject.c @@ -358,80 +358,87 @@ PyArray_ResolveWritebackIfCopy(PyArrayObject * self) /*********************** end C-API functions **********************/ -/* dealloc must not raise an error, best effort try to write - to stderr and clear the error -*/ - -static inline void -WARN_IN_DEALLOC(PyObject* warning, const char * msg) { - if (PyErr_WarnEx(warning, msg, 1) < 0) { - PyObject * s; - - s = PyUnicode_FromString("array_dealloc"); - if (s) { - PyErr_WriteUnraisable(s); - Py_DECREF(s); - } - else { - PyErr_WriteUnraisable(Py_None); - } +/* + * During dealloc we cannot propagate errors so if unraisable is set + * we simply print out the error message and convert the error into + * success (returning 0). + */ +static inline int +write_and_clear_error_if_unraisable(int status, npy_bool unraisable) +{ + if (status < 0 && unraisable) { + PyErr_WriteUnraisable(npy_interned_str.array_dealloc); + return 0; } + return status; } /* array object functions */ -static void -array_dealloc(PyArrayObject *self) +/* + * Much of the actual work for dealloc, split off for use in __setstate__ + * via clear_array_attributes function defined below. + * If not unraisable, will return -1 on error, 0 on success. + * If unraisable, always succeeds, though may print errors and warnings. + */ +static int +_clear_array_attributes(PyArrayObject *self, npy_bool unraisable) { PyArrayObject_fields *fa = (PyArrayObject_fields *)self; if (_buffer_info_free(fa->_buffer_info, (PyObject *)self) < 0) { - PyErr_WriteUnraisable(NULL); + if (write_and_clear_error_if_unraisable(-1, unraisable) < 0) { + return -1; + } } + fa->_buffer_info = NULL; - if (fa->weakreflist != NULL) { - PyObject_ClearWeakRefs((PyObject *)self); - } if (fa->base) { - int retval; if (PyArray_FLAGS(self) & NPY_ARRAY_WRITEBACKIFCOPY) { - char const * msg = "WRITEBACKIFCOPY detected in array_dealloc. " + char const * msg = "WRITEBACKIFCOPY detected in clearing of array. " " Required call to PyArray_ResolveWritebackIfCopy or " "PyArray_DiscardWritebackIfCopy is missing."; + int retval = PyErr_WarnEx(PyExc_RuntimeWarning, msg, 1); + if (write_and_clear_error_if_unraisable(retval, unraisable) < 0) { + return -1; + } /* * prevent reaching 0 twice and thus recursing into dealloc. * Increasing sys.gettotalrefcount, but path should not be taken. */ Py_INCREF(self); - WARN_IN_DEALLOC(PyExc_RuntimeWarning, msg); retval = PyArray_ResolveWritebackIfCopy(self); - if (retval < 0) - { - PyErr_Print(); - PyErr_Clear(); + if (write_and_clear_error_if_unraisable(retval, unraisable) < 0) { + return -1; } } /* * If fa->base is non-NULL, it is something * to DECREF -- either a view or a buffer object */ - Py_XDECREF(fa->base); + Py_CLEAR(fa->base); } if ((fa->flags & NPY_ARRAY_OWNDATA) && fa->data) { /* Free any internal references */ if (PyDataType_REFCHK(fa->descr)) { if (PyArray_ClearArray(self) < 0) { - PyErr_WriteUnraisable(NULL); + if (write_and_clear_error_if_unraisable(-1, unraisable) < 0) { + return -1; + } } } + /* mem_handler can be absent if NPY_ARRAY_OWNDATA arbitrarily set */ if (fa->mem_handler == NULL) { - if (npy_thread_unsafe_state.warn_if_no_mem_policy) { + if (npy_global_state.warn_if_no_mem_policy) { char const *msg = "Trying to dealloc data, but a memory policy " "is not set. If you take ownership of the data, you must " "set a base owning the data (e.g. a PyCapsule)."; - WARN_IN_DEALLOC(PyExc_RuntimeWarning, msg); + int retval = PyErr_WarnEx(PyExc_RuntimeWarning, msg, 1); + if (write_and_clear_error_if_unraisable(retval, unraisable) < 0) { + return -1; + } } // Guess at malloc/free ??? free(fa->data); @@ -442,16 +449,40 @@ array_dealloc(PyArrayObject *self) nbytes = 1; } PyDataMem_UserFREE(fa->data, nbytes, fa->mem_handler); - Py_DECREF(fa->mem_handler); + Py_CLEAR(fa->mem_handler); } + fa->data = NULL; } /* must match allocation in PyArray_NewFromDescr */ npy_free_cache_dim(fa->dimensions, 2 * fa->nd); - Py_DECREF(fa->descr); + fa->dimensions = NULL; + Py_CLEAR(fa->descr); + return 0; +} + +static void +array_dealloc(PyArrayObject *self) +{ + // NPY_TRUE flags that errors are unraisable. + int ret = _clear_array_attributes(self, NPY_TRUE); + // silence unused variable warning in release builds + (void)ret; + assert(ret == 0); // should always succeed if unraisable. + // Only done on actual deallocation, nothing allocated by numpy. + if (((PyArrayObject_fields *)self)->weakreflist != NULL) { + PyObject_ClearWeakRefs((PyObject *)self); + } Py_TYPE(self)->tp_free((PyObject *)self); } +NPY_NO_EXPORT int +clear_array_attributes(PyArrayObject *self) +{ + // NPY_FALSE flags that errors can be raised. + return _clear_array_attributes(self, NPY_FALSE); +} + /*NUMPY_API * Prints the raw data of the ndarray in a form useful for debugging * low-level C issues. @@ -930,6 +961,9 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) PyErr_Clear(); PyArrayObject *array_other = (PyArrayObject *)PyArray_FROM_O(other); + if (array_other == NULL) { + return NULL; + } if (PyArray_TYPE(array_other) == NPY_VOID) { /* * Void arrays are currently not handled by ufuncs, so if the other @@ -1232,7 +1266,7 @@ NPY_NO_EXPORT PyTypeObject PyArray_Type = { .tp_as_mapping = &array_as_mapping, .tp_str = (reprfunc)array_str, .tp_as_buffer = &array_as_buffer, - .tp_flags =(Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE), + .tp_flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_SEQUENCE), .tp_richcompare = (richcmpfunc)array_richcompare, .tp_weaklistoffset = offsetof(PyArrayObject_fields, weakreflist), diff --git a/numpy/_core/src/multiarray/arrayobject.h b/numpy/_core/src/multiarray/arrayobject.h index 8d6f84faa6b1..d0f95dc228c7 100644 --- a/numpy/_core/src/multiarray/arrayobject.h +++ b/numpy/_core/src/multiarray/arrayobject.h @@ -19,6 +19,13 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op); NPY_NO_EXPORT int array_might_be_written(PyArrayObject *obj); +/* + * For use in __setstate__, where pickle gives us an instance on which we + * have to replace all the actual data. Returns 0 on success, -1 on error. + */ +NPY_NO_EXPORT int +clear_array_attributes(PyArrayObject *self); + /* * This flag is used to mark arrays which we would like to, in the future, * turn into views. It causes a warning to be issued on the first attempt to diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index d67bdd046c6d..54cd65e5ebcc 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -4380,7 +4380,7 @@ static int /* Keeping Half macros consistent with standard C -Refernce: https://en.cppreference.com/w/c/types/limits.html +Reference: https://en.cppreference.com/w/c/types/limits.html */ #define HALF_MAX 31743 /* Bit pattern for 65504.0 */ #define HALF_MIN 1024 /* Bit pattern for smallest positive normal: 2^-14 */ @@ -4476,7 +4476,7 @@ static int /* Definition: Minimum negative integer such that FLT_RADIX raised by power one less than that integer is a normalized float, double and long double respectively - refernce: https://en.cppreference.com/w/c/types/limits.html + reference: https://en.cppreference.com/w/c/types/limits.html */ *(npy_intp *)ptr = @ABB@_MIN_EXP - 1; return 1; @@ -4716,7 +4716,7 @@ set_typeinfo(PyObject *dict) * #name = STRING, UNICODE, VOID# */ - PyDataType_MAKEUNSIZED(&@name@_Descr); + PyDataType_MAKEUNSIZED((PyArray_Descr *)&@name@_Descr); /**end repeat**/ diff --git a/numpy/_core/src/multiarray/buffer.c b/numpy/_core/src/multiarray/buffer.c index a6c683f26a8b..f0f4b6d7056c 100644 --- a/numpy/_core/src/multiarray/buffer.c +++ b/numpy/_core/src/multiarray/buffer.c @@ -30,7 +30,7 @@ * * - Don't add new members to ndarray or descr structs, to preserve binary * compatibility. (Also, adding the items is actually not very useful, - * since mutability issues prevent an 1 to 1 relationship between arrays + * since mutability issues prevent a one-to-one relationship between arrays * and buffer views.) * * - Don't use bf_releasebuffer, because it prevents PyArg_ParseTuple("s#", ... @@ -793,8 +793,10 @@ array_getbuffer(PyObject *obj, Py_buffer *view, int flags) } /* Fill in information (and add it to _buffer_info if necessary) */ + Py_BEGIN_CRITICAL_SECTION(self); info = _buffer_get_info( &((PyArrayObject_fields *)self)->_buffer_info, obj, flags); + Py_END_CRITICAL_SECTION(); if (info == NULL) { goto fail; } @@ -880,7 +882,10 @@ void_getbuffer(PyObject *self, Py_buffer *view, int flags) * to find the correct format. This format must also be stored, since * at least in theory it can change (in practice it should never change). */ - _buffer_info_t *info = _buffer_get_info(&scalar->_buffer_info, self, flags); + _buffer_info_t *info = NULL; + Py_BEGIN_CRITICAL_SECTION(scalar); + info = _buffer_get_info(&scalar->_buffer_info, self, flags); + Py_END_CRITICAL_SECTION(); if (info == NULL) { Py_DECREF(self); return -1; diff --git a/numpy/_core/src/multiarray/calculation.c b/numpy/_core/src/multiarray/calculation.c index b95b37987f8e..6585982efec3 100644 --- a/numpy/_core/src/multiarray/calculation.c +++ b/numpy/_core/src/multiarray/calculation.c @@ -654,7 +654,7 @@ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) op2 = n_ops.multiply; if (decimals == INT_MIN) { // not technically correct but it doesn't matter because no one in - // this millenium is using floating point numbers with enough + // this millennium is using floating point numbers with enough // accuracy for this to matter decimals = INT_MAX; } @@ -828,8 +828,8 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o NPY_NO_EXPORT PyObject * PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) { - if (PyArray_ISCOMPLEX(self) || PyArray_ISOBJECT(self) || - PyArray_ISUSERDEF(self)) { + if (NPY_DT_SLOTS(NPY_DTYPE(PyArray_DTYPE(self)))->imag_meth != NULL) { + /* The dtype has `arr.imag` so `conjugate` must exist (or error) */ if (out == NULL) { return PyArray_GenericUnaryFunction(self, n_ops.conjugate); @@ -841,12 +841,14 @@ PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) } } else { - PyArrayObject *ret; - if (!PyArray_ISNUMBER(self)) { + if (!NPY_DT_is_numeric(NPY_DTYPE(PyArray_DTYPE(self)))) { PyErr_SetString(PyExc_TypeError, - "cannot conjugate non-numeric dtype"); + "cannot conjugate non-numeric dtype"); return NULL; } + + /* Numeric but no `.imag`: real-valued (or `.imag` should error) */ + PyArrayObject *ret; if (out) { if (PyArray_AssignArray(out, self, NULL, NPY_DEFAULT_ASSIGN_CASTING) < 0) { diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 2e9bcbf29e8f..954179c66cb3 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -381,7 +381,7 @@ _may_have_objects(PyArray_Descr *dtype) */ NPY_NO_EXPORT PyArrayObject * new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, - int nd, npy_intp dimensions[], int typenum, PyArrayObject **result) + int nd, npy_intp dimensions[], PyArray_Descr *descr, PyArrayObject **result) { PyArrayObject *out_buf; @@ -390,7 +390,7 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, /* verify that out is usable */ if (PyArray_NDIM(out) != nd || - PyArray_TYPE(out) != typenum || + !PyArray_EquivTypes(PyArray_DESCR(out), descr) || !PyArray_ISCARRAY(out)) { PyErr_SetString(PyExc_ValueError, "output array is not acceptable (must have the right datatype, " @@ -418,8 +418,8 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, /* set copy-back */ Py_INCREF(out); if (PyArray_SetWritebackIfCopyBase(out_buf, out) < 0) { - Py_DECREF(out); Py_DECREF(out_buf); + // PyArray_SetWritebackIfCopyBase steals reference to second argument return NULL; } } @@ -452,10 +452,11 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, subtype = Py_TYPE(ap1); } - out_buf = (PyArrayObject *)PyArray_New(subtype, nd, dimensions, - typenum, NULL, NULL, 0, 0, - (PyObject *) - (prior2 > prior1 ? ap2 : ap1)); + Py_INCREF(descr); + out_buf = (PyArrayObject *)PyArray_NewFromDescr(subtype, descr, nd, dimensions, + NULL, NULL, 0, + (PyObject *) + (prior2 > prior1 ? ap2 : ap1)); if (out_buf != NULL && result) { Py_INCREF(out_buf); @@ -477,3 +478,61 @@ check_is_convertible_to_scalar(PyArrayObject *v) "only 0-dimensional arrays can be converted to Python scalars"); return -1; } + +NPY_NO_EXPORT PyObject * +build_array_interface(PyObject *dataptr, PyObject *descr, PyObject *strides, + PyObject *typestr, PyObject *shape) +{ + PyObject *inter = NULL; + PyObject *version = NULL; + int ret; + + inter = PyDict_New(); + if (inter == NULL) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "data", dataptr); + if (ret < 0) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "strides", strides); + if (ret < 0) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "descr", descr); + if (ret < 0) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "typestr", typestr); + if (ret < 0) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "shape", shape); + if (ret < 0) { + goto fail; + } + + version = PyLong_FromLong(3); + if (version == NULL) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "version", version); + if (ret < 0) { + goto fail; + } + Py_XDECREF(version); + return inter; + + +fail: + Py_XDECREF(inter); + Py_XDECREF(version); + return NULL; + +} diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index db7bc64733db..f4d0e595aaa5 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -22,6 +22,10 @@ extern "C" { #define error_converting(x) (((x) == -1) && PyErr_Occurred()) +NPY_NO_EXPORT PyObject * +build_array_interface(PyObject *dataptr, PyObject *descr, PyObject *strides, + PyObject *typestr, PyObject *shape); + NPY_NO_EXPORT PyArray_Descr * PyArray_DTypeFromObjectStringDiscovery( PyObject *obj, PyArray_Descr *last_dtype, int string_type); @@ -335,7 +339,7 @@ check_is_convertible_to_scalar(PyArrayObject *v); */ NPY_NO_EXPORT PyArrayObject * new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, - int nd, npy_intp dimensions[], int typenum, PyArrayObject **result); + int nd, npy_intp dimensions[], PyArray_Descr *descr, PyArrayObject **result); /* diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index e6a45554555f..ba68304a8082 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -123,10 +123,9 @@ arr_bincount(PyObject *NPY_UNUSED(self), PyObject *const *args, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("bincount", args, len_args, kwnames, - "list", NULL, &list, - "|weights", NULL, &weight, - "|minlength", NULL, &mlength, - NULL, NULL, NULL) < 0) { + {"list", NULL, &list}, + {"|weights", NULL, &weight}, + {"|minlength", NULL, &mlength}) < 0) { return NULL; } @@ -158,20 +157,7 @@ arr_bincount(PyObject *NPY_UNUSED(self), PyObject *const *args, lst = (PyArrayObject *)PyArray_FromAny((PyObject *)tmp1, local_dtype, 1, 1, flags, NULL); Py_DECREF(tmp1); if (lst == NULL) { - /* Failed converting to NPY_INTP. */ - if (PyErr_ExceptionMatches(PyExc_TypeError)) { - PyErr_Clear(); - /* Deprecated 2024-08-02, NumPy 2.1 */ - if (DEPRECATE("Non-integer input passed to bincount. In a " - "future version of NumPy, this will be an " - "error. (Deprecated NumPy 2.1)") < 0) { - goto fail; - } - } - else { - /* Failure was not a TypeError. */ - goto fail; - } + goto fail; } } else { @@ -566,12 +552,11 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *const *args, Py_ssize_t len_arg NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("interp", args, len_args, kwnames, - "x", NULL, &x, - "xp", NULL, &xp, - "fp", NULL, &fp, - "|left", NULL, &left, - "|right", NULL, &right, - NULL, NULL, NULL) < 0) { + {"x", NULL, &x}, + {"xp", NULL, &xp}, + {"fp", NULL, &fp}, + {"|left", NULL, &left}, + {"|right", NULL, &right}) < 0) { return NULL; } @@ -738,12 +723,11 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *const *args, Py_ssize_t NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("interp_complex", args, len_args, kwnames, - "x", NULL, &x, - "xp", NULL, &xp, - "fp", NULL, &fp, - "|left", NULL, &left, - "|right", NULL, &right, - NULL, NULL, NULL) < 0) { + {"x", NULL, &x}, + {"xp", NULL, &xp}, + {"fp", NULL, &fp}, + {"|left", NULL, &left}, + {"|right", NULL, &right}) < 0) { return NULL; } @@ -1478,9 +1462,8 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("add_docstring", args, len_args, NULL, - "", NULL, &obj, - "", NULL, &str, - NULL, NULL, NULL) < 0) { + {"", NULL, &obj}, + {"", NULL, &str}) < 0) { return NULL; } if (!PyUnicode_Check(str)) { @@ -1628,9 +1611,29 @@ pack_inner(const char *inptr, #else npy_uint64 arr[4] = {bb[0], bb[1], bb[2], bb[3]}; #endif + + #if NPY_BYTE_ORDER == NPY_BIG_ENDIAN + #if NPY_SIMD_WIDTH == 16 + arr[0] = npy_bswap8(arr[0]); + #elif NPY_SIMD_WIDTH == 32 + arr[0] = npy_bswap8(arr[0]); + arr[1] = npy_bswap8(arr[1]); + #else + arr[0] = npy_bswap8(arr[0]); + arr[1] = npy_bswap8(arr[1]); + arr[2] = npy_bswap8(arr[2]); + arr[3] = npy_bswap8(arr[3]); + #endif + #endif memcpy(outptr, arr, sizeof(arr)); outptr += vstepx4; } else { + #if NPY_BYTE_ORDER == NPY_BIG_ENDIAN + bb[0] = npy_bswap8(bb[0]); + bb[1] = npy_bswap8(bb[1]); + bb[2] = npy_bswap8(bb[2]); + bb[3] = npy_bswap8(bb[3]); + #endif for(int i = 0; i < 4; i++) { for (int j = 0; j < vstep; j++) { memcpy(outptr, (char*)&bb[i] + j, 1); @@ -1645,6 +1648,11 @@ pack_inner(const char *inptr, va = npyv_rev64_u8(va); } npy_uint64 bb = npyv_tobits_b8(npyv_cmpneq_u8(va, v_zero)); + + #if NPY_BYTE_ORDER == NPY_BIG_ENDIAN + bb = npy_bswap8(bb); + #endif + for (int i = 0; i < vstep; ++i) { memcpy(outptr, (char*)&bb + i, 1); outptr += out_stride; diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index 164aa2e4c8b4..41405beef9a8 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -1332,20 +1332,6 @@ PyArray_TypestrConvert(int itemsize, int gentype) newtype = NPY_STRING; break; - case NPY_DEPRECATED_STRINGLTR2: - { - /* - * raise a deprecation warning, which might be an exception - * if warnings are errors, so leave newtype unset in that - * case - */ - int ret = DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " - "Use the 'S' alias instead."); - if (ret == 0) { - newtype = NPY_STRING; - } - break; - } case NPY_UNICODELTR: newtype = NPY_UNICODE; break; diff --git a/numpy/_core/src/multiarray/convert.c b/numpy/_core/src/multiarray/convert.c index ccd883f2b0f4..841a873ce839 100644 --- a/numpy/_core/src/multiarray/convert.c +++ b/numpy/_core/src/multiarray/convert.c @@ -6,6 +6,7 @@ #include #include "npy_config.h" +#include "npy_pycompat.h" // PyObject_GetOptionalAttr #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -24,6 +25,8 @@ #include "convert.h" #include "array_coercion.h" #include "refcount.h" +#include "getset.h" +#include "npy_static_data.h" #if defined(HAVE_FALLOCATE) && defined(__linux__) #include @@ -352,17 +355,17 @@ PyArray_ToString(PyArrayObject *self, NPY_ORDER order) || (PyArray_IS_F_CONTIGUOUS(self) && (order == NPY_FORTRANORDER))) { return PyBytes_FromStringAndSize(PyArray_DATA(self), (Py_ssize_t) numbytes); } - + /* Avoid Ravel where possible for fewer copies. */ - if (!PyDataType_REFCHK(PyArray_DESCR(self)) && + if (!PyDataType_REFCHK(PyArray_DESCR(self)) && ((PyArray_DESCR(self)->flags & NPY_NEEDS_INIT) == 0)) { - + /* Allocate final Bytes Object */ ret = PyBytes_FromStringAndSize(NULL, (Py_ssize_t) numbytes); if (ret == NULL) { return NULL; } - + /* Writable Buffer */ char* dest = PyBytes_AS_STRING(ret); @@ -388,14 +391,14 @@ PyArray_ToString(PyArrayObject *self, NPY_ORDER order) Py_DECREF(ret); return NULL; } - + /* Copy directly from source to destination with proper ordering */ if (PyArray_CopyInto(dest_array, self) < 0) { Py_DECREF(dest_array); Py_DECREF(ret); return NULL; } - + Py_DECREF(dest_array); return ret; @@ -406,7 +409,7 @@ PyArray_ToString(PyArrayObject *self, NPY_ORDER order) if (contig == NULL) { return NULL; } - + ret = PyBytes_FromStringAndSize(PyArray_DATA(contig), numbytes); Py_DECREF(contig); return ret; @@ -545,26 +548,129 @@ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype) dtype = PyArray_DESCR(self); flags = PyArray_FLAGS(self); + if (type == NULL) { + /* No dtype change: just create the view */ + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_NewFromDescr_int( + subtype, dtype, + PyArray_NDIM(self), PyArray_DIMS(self), + PyArray_STRIDES(self), PyArray_DATA(self), + flags, (PyObject *)self, (PyObject *)self, + _NPY_ARRAY_ENSURE_DTYPE_IDENTITY); + return (PyObject *)ret; + } + + /* + * Changing dtype on a subclass. We support three paths: + * + * 1. subclass overrides _set_dtype: create subclass view first, + * then call _set_dtype (subclass handles dtype change). + * 2. subclass overrides the dtype descriptor (e.g. property with + * setter): create subclass view first, use the setter, but + * emit a deprecation asking to implement _set_dtype instead. + * 3. Otherwise (including plain ndarray): create an ndarray base + * view, set dtype internally, then create the subclass view + * if needed. __array_finalize__ sees the final dtype+shape. + */ + int use_set_dtype = 0; + int use_dtype_prop = 0; + + if (subtype != &PyArray_Type) { + PyObject *sub_set_dtype; + if (PyObject_GetOptionalAttr( + (PyObject *)subtype, + npy_interned_str._set_dtype, &sub_set_dtype) < 0) { + goto finish; + } + use_set_dtype = (sub_set_dtype != NULL && + sub_set_dtype != npy_static_pydata.ndarray_set_dtype); + Py_XDECREF(sub_set_dtype); + + if (!use_set_dtype) { + PyObject *sub_dtype_descr; + if (PyObject_GetOptionalAttr( + (PyObject *)subtype, + npy_interned_str.dtype, &sub_dtype_descr) < 0) { + goto finish; + } + use_dtype_prop = (sub_dtype_descr != NULL && + sub_dtype_descr != npy_static_pydata.ndarray_dtype_descr && + Py_TYPE(sub_dtype_descr)->tp_descr_set != NULL); + Py_XDECREF(sub_dtype_descr); + } + } + + if (use_set_dtype || use_dtype_prop) { + /* + * Paths 1 & 2: create subclass view with original dtype, + * then let the subclass handle the dtype change. + */ + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_NewFromDescr_int( + subtype, dtype, + PyArray_NDIM(self), PyArray_DIMS(self), + PyArray_STRIDES(self), PyArray_DATA(self), + flags, (PyObject *)self, (PyObject *)self, + _NPY_ARRAY_ENSURE_DTYPE_IDENTITY); + if (ret == NULL) { + goto finish; + } + if (use_set_dtype) { + PyObject *res = PyObject_CallMethodOneArg( + (PyObject *)ret, + npy_interned_str._set_dtype, (PyObject *)type); + if (res == NULL) { + Py_CLEAR(ret); + goto finish; + } + Py_DECREF(res); + } + else { + if (PyObject_GenericSetAttr( + (PyObject *)ret, npy_interned_str.dtype, + (PyObject *)type) < 0) { + Py_CLEAR(ret); + goto finish; + } + /* DEPRECATED 2026-04-13, NumPy 2.5 */ + if (DEPRECATE( + "numpy.ndarray.view() used a custom `dtype` setter " + "to change the dtype of the view. Subclasses should " + "implement `_set_dtype` instead.") < 0) { + Py_CLEAR(ret); + goto finish; + } + } + goto finish; + } + + /* Path 3: create ndarray base view and set dtype internally */ Py_INCREF(dtype); ret = (PyArrayObject *)PyArray_NewFromDescr_int( - subtype, dtype, - PyArray_NDIM(self), PyArray_DIMS(self), PyArray_STRIDES(self), - PyArray_DATA(self), + &PyArray_Type, dtype, + PyArray_NDIM(self), PyArray_DIMS(self), + PyArray_STRIDES(self), PyArray_DATA(self), flags, (PyObject *)self, (PyObject *)self, _NPY_ARRAY_ENSURE_DTYPE_IDENTITY); if (ret == NULL) { - Py_XDECREF(type); - return NULL; + goto finish; + } + if (array_descr_set_internal(ret, (PyObject *)type) < 0) { + Py_CLEAR(ret); + goto finish; } - if (type != NULL) { - if (PyObject_SetAttrString((PyObject *)ret, "dtype", - (PyObject *)type) < 0) { - Py_DECREF(ret); - Py_DECREF(type); - return NULL; - } - Py_DECREF(type); + if (subtype != &PyArray_Type) { + Py_INCREF(PyArray_DESCR(ret)); + Py_SETREF(ret, (PyArrayObject *)PyArray_NewFromDescr_int( + subtype, PyArray_DESCR(ret), + PyArray_NDIM(ret), PyArray_DIMS(ret), + PyArray_STRIDES(ret), PyArray_DATA(ret), + PyArray_FLAGS(ret), (PyObject *)self, (PyObject *)self, + _NPY_ARRAY_ENSURE_DTYPE_IDENTITY)); } + +finish: + Py_DECREF(type); return (PyObject *)ret; } diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index dbab8b4253d8..f404faab022a 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -232,6 +232,7 @@ PyArray_GetBoundCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) res->dtypes = PyMem_Malloc(2 * sizeof(PyArray_DTypeMeta *)); if (res->dtypes == NULL) { Py_DECREF(res); + PyErr_NoMemory(); return NULL; } Py_INCREF(from); @@ -669,7 +670,6 @@ dtype_kind_to_ordering(char kind) return 5; /* String kind */ case 'S': - case 'a': return 6; /* Unicode kind */ case 'U': @@ -931,7 +931,7 @@ PyArray_CastDescrToDType(PyArray_Descr *descr, PyArray_DTypeMeta *given_DType) Py_INCREF(descr); return descr; } - if (!NPY_DT_is_parametric(given_DType)) { + if (!NPY_DT_is_parametric(given_DType) && !NPY_DT_is_abstract(given_DType)) { /* * Don't actually do anything, the default is always the result * of any cast. @@ -994,10 +994,8 @@ PyArray_FindConcatenationDescriptor( PyArray_DTypeMeta *common_dtype; PyArray_Descr *result = NULL; - if (PyArray_ExtractDTypeAndDescriptor( - requested_dtype, &result, &common_dtype) < 0) { - return NULL; - } + PyArray_ExtractDTypeAndDescriptor( + requested_dtype, &result, &common_dtype); if (result != NULL) { if (PyDataType_SUBARRAY(result) != NULL) { PyErr_Format(PyExc_TypeError, @@ -1817,7 +1815,7 @@ PyArray_Zero(PyArrayObject *arr) /* XXX this is dangerous, the caller probably is not aware that zeroval is actually a static PyObject* In the best case they will only use it as-is, but - if they simply memcpy it into a ndarray without using + if they simply memcpy it into an ndarray without using setitem(), refcount errors will occur */ memcpy(zeroval, &npy_static_pydata.zero_obj, sizeof(PyObject *)); @@ -1856,7 +1854,7 @@ PyArray_One(PyArrayObject *arr) /* XXX this is dangerous, the caller probably is not aware that oneval is actually a static PyObject* In the best case they will only use it as-is, but - if they simply memcpy it into a ndarray without using + if they simply memcpy it into an ndarray without using setitem(), refcount errors will occur */ memcpy(oneval, &npy_static_pydata.one_obj, sizeof(PyObject *)); @@ -1938,14 +1936,21 @@ PyArray_ConvertToCommonType(PyObject *op, int *retn) PyArray_Descr *common_descr = NULL; PyArrayObject **mps = NULL; - *retn = n = PySequence_Length(op); - if (n == 0) { + Py_ssize_t length = PySequence_Length(op); + if (length == 0) { PyErr_SetString(PyExc_ValueError, "0-length sequence."); } if (PyErr_Occurred()) { *retn = 0; return NULL; } + if (length > INT_MAX) { + PyErr_SetString(PyExc_ValueError, + "sequence too large to convert in common type."); + *retn = 0; + return NULL; + } + *retn = n = (int)length; mps = (PyArrayObject **)PyDataMem_NEW(n*sizeof(PyArrayObject *)); if (mps == NULL) { *retn = 0; @@ -2091,6 +2096,7 @@ PyArray_AddCastingImplementation_FromSpec(PyArrayMethod_Spec *spec, int private) if (meth == NULL) { return -1; } + meth->method->flags |= _NPY_METH_IS_CAST; int res = PyArray_AddCastingImplementation(meth); Py_DECREF(meth); if (res < 0) { diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 3a43e1bd983b..49b83d7c9e79 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -493,33 +493,49 @@ NPY_NO_EXPORT int PyArray_AssignFromCache_Recursive( PyArrayObject *self, const int ndim, coercion_cache_obj **cache) { + int ret = -1; /* Consume first cache element by extracting information and freeing it */ PyObject *obj = (*cache)->arr_or_sequence; Py_INCREF(obj); - npy_bool sequence = (*cache)->sequence; + npy_bool is_sequence = (*cache)->sequence; + /* + If it is a sequence, this object is the argument to PySequence_Fast, e.g. + the iterable that the user wants to coerce into an array + */ + PyObject *orig_seq = (*cache)->converted_obj; + /* Owned reference to an item in the sequence */ + PyObject *item_pyvalue = NULL; int depth = (*cache)->depth; *cache = npy_unlink_coercion_cache(*cache); - /* The element is either a sequence, or an array */ - if (!sequence) { + /* The element is either a sequence or an array */ + if (!is_sequence) { /* Straight forward array assignment */ assert(PyArray_Check(obj)); if (PyArray_CopyInto(self, (PyArrayObject *)obj) < 0) { - goto fail; + goto finish; } } else { assert(depth != ndim); - npy_intp length = PySequence_Length(obj); - if (length != PyArray_DIMS(self)[0]) { - PyErr_SetString(PyExc_RuntimeError, - "Inconsistent object during array creation? " - "Content of sequences changed (length inconsistent)."); - goto fail; - } - - for (npy_intp i = 0; i < length; i++) { - PyObject *value = PySequence_Fast_GET_ITEM(obj, i); + npy_intp orig_length = PyArray_DIMS(self)[0]; + int err = 1; + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(orig_seq); + for (npy_intp i = 0; i < orig_length; i++) { + // this macro takes *the argument* of PySequence_Fast, which is orig_seq; + // not the object returned by PySequence_Fast, which is a proxy object + // with its own per-object PyMutex lock. + // We want to lock the list object exposed to users, not the proxy. + npy_intp length = PySequence_Fast_GET_SIZE(obj); + if (length != orig_length) { + PyErr_SetString(PyExc_RuntimeError, + "Inconsistent object during array creation? " + "Content of sequences changed (length inconsistent)."); + goto finish_critical_section; + } + else { + Py_XSETREF(item_pyvalue, Py_NewRef(PySequence_Fast_GET_ITEM(obj, i))); + } if (ndim == depth + 1) { /* @@ -532,11 +548,11 @@ PyArray_AssignFromCache_Recursive( */ char *item; item = (PyArray_BYTES(self) + i * PyArray_STRIDES(self)[0]); - if (PyArray_Pack(PyArray_DESCR(self), item, value) < 0) { - goto fail; + if (PyArray_Pack(PyArray_DESCR(self), item, item_pyvalue) < 0) { + goto finish_critical_section; } /* If this was an array(-like) we still need to unlike int: */ - if (*cache != NULL && (*cache)->converted_obj == value) { + if (*cache != NULL && (*cache)->converted_obj == item_pyvalue) { *cache = npy_unlink_coercion_cache(*cache); } } @@ -544,22 +560,30 @@ PyArray_AssignFromCache_Recursive( PyArrayObject *view; view = (PyArrayObject *)array_item_asarray(self, i); if (view == NULL) { - goto fail; + goto finish_critical_section; } if (PyArray_AssignFromCache_Recursive(view, ndim, cache) < 0) { Py_DECREF(view); - goto fail; + goto finish_critical_section; } Py_DECREF(view); } } + err = 0; + finish_critical_section:; + + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); + if (err) { + goto finish; + } + } - Py_DECREF(obj); - return 0; + ret = 0; - fail: + finish:; + Py_XDECREF(item_pyvalue); Py_DECREF(obj); - return -1; + return ret; } @@ -1036,7 +1060,7 @@ PyArray_NewFromDescrAndBase( * NPY_KEEPORDER - Keeps the axis ordering of prototype. * descr - If not NULL, overrides the data type of the result. * dtype - If not NULL and if descr is NULL, overrides the data type - of the result, so long as dtype is non-parameteric + of the result, so long as dtype is non-parametric * ndim - If not -1, overrides the shape of the result. * dims - If ndim is not -1, overrides the shape of the result. * subok - If 1, use the prototype's array subtype, otherwise @@ -1405,7 +1429,6 @@ _array_from_buffer_3118(PyObject *memoryview) * @param requested_dtype a requested dtype instance, may be NULL; The result * DType may be used, but is not enforced. * @param writeable whether the result must be writeable. - * @param context Unused parameter, must be NULL (should be removed later). * @param copy Specifies the copy behavior. * @param was_copied_by__array__ Set to 1 if it can be assumed that a copy * was made by implementor. @@ -1416,7 +1439,7 @@ _array_from_buffer_3118(PyObject *memoryview) */ NPY_NO_EXPORT PyObject * _array_from_array_like(PyObject *op, - PyArray_Descr *requested_dtype, npy_bool writeable, PyObject *context, + PyArray_Descr *requested_dtype, npy_bool writeable, int copy, int *was_copied_by__array__) { PyObject* tmp; @@ -1496,19 +1519,19 @@ NPY_NO_EXPORT PyObject * PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, int max_depth, int flags, PyObject *context) { + if (context != NULL) { + PyErr_SetString(PyExc_RuntimeError, "'context' must be NULL"); + Py_XDECREF(newtype); + return NULL; + } + npy_dtype_info dt_info = {NULL, NULL}; - int res = PyArray_ExtractDTypeAndDescriptor( + PyArray_ExtractDTypeAndDescriptor( newtype, &dt_info.descr, &dt_info.dtype); Py_XDECREF(newtype); - if (res < 0) { - Py_XDECREF(dt_info.descr); - Py_XDECREF(dt_info.dtype); - return NULL; - } - /* * The internal implementation treats 0 as actually wanting a zero-dimensional * array, but the API for this function has typically treated it as @@ -1522,7 +1545,7 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, int was_scalar; PyObject* ret = PyArray_FromAny_int( op, dt_info.descr, dt_info.dtype, - min_depth, max_depth, flags, context, &was_scalar); + min_depth, max_depth, flags, &was_scalar); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); @@ -1544,23 +1567,29 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, NPY_NO_EXPORT PyObject * PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, int max_depth, - int flags, PyObject *context, int *was_scalar) + int flags, int *was_scalar) { /* * This is the main code to make a NumPy array from a Python * Object. It is called from many different places. */ + + /* + * Fast path: op is already an ndarray with no dtype, flags, or depth + * constraints. Avoids DiscoverDTypeAndShape + PyArray_CanCastArrayTo. + */ + if (in_descr == NULL && in_DType == NULL && flags == 0 + && min_depth == 0 && PyArray_Check(op)) { + *was_scalar = 0; + return Py_NewRef(op); + } + PyArrayObject *arr = NULL, *ret = NULL; PyArray_Descr *dtype = NULL; coercion_cache_obj *cache = NULL; int ndim = 0; npy_intp dims[NPY_MAXDIMS]; - if (context != NULL) { - PyErr_SetString(PyExc_RuntimeError, "'context' must be NULL"); - return NULL; - } - // Default is copy = None int copy = -1; int was_copied_by__array__ = 0; @@ -1571,8 +1600,6 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, copy = 1; } - Py_BEGIN_CRITICAL_SECTION(op); - ndim = PyArray_DiscoverDTypeAndShape( op, max_depth, dims, &cache, in_DType, in_descr, &dtype, copy, &was_copied_by__array__); @@ -1741,7 +1768,6 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, cleanup:; Py_XDECREF(dtype); - Py_END_CRITICAL_SECTION(); return (PyObject *)ret; } @@ -1796,27 +1822,26 @@ NPY_NO_EXPORT PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, int max_depth, int requirements, PyObject *context) { + if (context != NULL) { + PyErr_SetString(PyExc_RuntimeError, "'context' must be NULL"); + Py_XDECREF(descr); + return NULL; + } + npy_dtype_info dt_info = {NULL, NULL}; - int res = PyArray_ExtractDTypeAndDescriptor( + PyArray_ExtractDTypeAndDescriptor( descr, &dt_info.descr, &dt_info.dtype); Py_XDECREF(descr); - if (res < 0) { - Py_XDECREF(dt_info.descr); - Py_XDECREF(dt_info.dtype); - return NULL; - } - /* See comment in PyArray_FromAny for rationale */ if (max_depth == 0 || max_depth > NPY_MAXDIMS) { max_depth = NPY_MAXDIMS; } PyObject* ret = PyArray_CheckFromAny_int( - op, dt_info.descr, dt_info.dtype, min_depth, max_depth, requirements, - context); + op, dt_info.descr, dt_info.dtype, min_depth, max_depth, requirements); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); @@ -1830,7 +1855,7 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, NPY_NO_EXPORT PyObject * PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, - int max_depth, int requirements, PyObject *context) + int max_depth, int requirements) { PyObject *obj; Py_XINCREF(in_descr); /* take ownership as we may replace it */ @@ -1849,7 +1874,7 @@ PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, int was_scalar; obj = PyArray_FromAny_int(op, in_descr, in_DType, min_depth, - max_depth, requirements, context, &was_scalar); + max_depth, requirements, &was_scalar); Py_XDECREF(in_descr); if (obj == NULL) { return NULL; @@ -1860,6 +1885,7 @@ PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, PyObject *ret; if (requirements & NPY_ARRAY_ENSURENOCOPY) { PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); + Py_DECREF(obj); return NULL; } ret = PyArray_NewCopy((PyArrayObject *)obj, NPY_ANYORDER); @@ -2980,18 +3006,12 @@ PyArray_Zeros(int nd, npy_intp const *dims, PyArray_Descr *type, int is_f_order) { npy_dtype_info dt_info = {NULL, NULL}; - int res = PyArray_ExtractDTypeAndDescriptor( + PyArray_ExtractDTypeAndDescriptor( type, &dt_info.descr, &dt_info.dtype); // steal reference Py_XDECREF(type); - if (res < 0) { - Py_XDECREF(dt_info.descr); - Py_XDECREF(dt_info.dtype); - return NULL; - } - PyObject *ret = PyArray_Zeros_int(nd, dims, dt_info.descr, dt_info.dtype, is_f_order); @@ -3045,16 +3065,12 @@ PyArray_Empty(int nd, npy_intp const *dims, PyArray_Descr *type, int is_f_order) { npy_dtype_info dt_info = {NULL, NULL}; - int res = PyArray_ExtractDTypeAndDescriptor( + PyArray_ExtractDTypeAndDescriptor( type, &dt_info.descr, &dt_info.dtype); // steal reference Py_XDECREF(type); - if (res < 0) { - return NULL; - } - PyObject *ret = PyArray_Empty_int( nd, dims, dt_info.descr, dt_info.dtype, is_f_order); @@ -3605,6 +3621,10 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char const *sep, size_t *nre thisbuf += 1; dptr += dtype->elsize; if (num < 0 && thisbuf == size) { + if (totalbytes > NPY_MAX_INTP - bytes) { + err = 1; + break; + } totalbytes += bytes; /* The handler is always valid */ tmp = PyDataMem_UserRENEW(PyArray_DATA(r), totalbytes, diff --git a/numpy/_core/src/multiarray/ctors.h b/numpy/_core/src/multiarray/ctors.h index b7a60e0065e0..7e3e9f6587bb 100644 --- a/numpy/_core/src/multiarray/ctors.h +++ b/numpy/_core/src/multiarray/ctors.h @@ -53,13 +53,13 @@ PyArray_New( NPY_NO_EXPORT PyObject * _array_from_array_like(PyObject *op, - PyArray_Descr *requested_dtype, npy_bool writeable, PyObject *context, + PyArray_Descr *requested_dtype, npy_bool writeable, int copy, int *was_copied_by__array__); NPY_NO_EXPORT PyObject * PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, int max_depth, - int flags, PyObject *context, int *was_scalar); + int flags, int *was_scalar); NPY_NO_EXPORT PyObject * PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, @@ -68,7 +68,7 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, NPY_NO_EXPORT PyObject * PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, - int max_depth, int requirements, PyObject *context); + int max_depth, int requirements); NPY_NO_EXPORT PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 9489e2b92c6a..26e78f6301d2 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -444,8 +444,16 @@ NpyDatetime_ConvertDatetime64ToDatetimeStruct( return -1; } - /* TODO: Change to a mechanism that avoids the potential overflow */ - dt *= meta->num; + /* Check for overflow and apply meta->num scaling */ + if (meta->num > 1) { + if (_datetime_scale_with_overflow_check( + &dt, (npy_int64)meta->num, 1, "datetime64") < 0) { + return -1; + } + } + else { + dt *= meta->num; + } /* * Note that care must be taken with the / and % operators @@ -2359,6 +2367,16 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj, return -1; } + if(meta->base == NPY_FR_GENERIC) { + if (DEPRECATE( + "The 'generic' unit for NumPy timedelta is deprecated, " + "and will raise an error in the future. " + "This includes implicit conversion of bare integers (e.g. `+ 1`)." + "Please use a specific unit instead.") < 0) { + return -1; + } + } + Py_DECREF(utf8); return 0; } @@ -2484,6 +2502,17 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj, meta->num = 1; } *out = NPY_DATETIME_NAT; + + if(meta->base == NPY_FR_GENERIC) { + if (DEPRECATE( + "The 'generic' unit for NumPy timedelta is deprecated, " + "and will raise an error in the future. " + "This includes implicit conversion of bare integers (e.g. `+ 1`)." + "Please use a specific unit instead.") < 0) { + return -1; + } + } + return 0; } else { @@ -2559,6 +2588,16 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, meta->base = NPY_FR_GENERIC; meta->num = 1; } + /* If output is NaT, skip this warning. */ + if(meta->base == NPY_FR_GENERIC) { + if (DEPRECATE( + "The 'generic' unit for NumPy timedelta is deprecated, " + "and will raise an error in the future. " + "This includes implicit conversion of bare integers (e.g. `+ 1`)." + "Please use a specific unit instead.") < 0) { + return -1; + } + } return 0; } @@ -2575,6 +2614,17 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, if (error_converting(*out)) { return -1; } + + if (meta->base == NPY_FR_GENERIC) { + if (DEPRECATE( + "The 'generic' unit for NumPy timedelta is deprecated, " + "and will raise an error in the future. " + "This includes implicit conversion of bare integers (e.g. `+ 1`)." + "Please use a specific unit instead.") < 0) { + return -1; + } + } + return 0; } /* Timedelta scalar */ @@ -3157,13 +3207,11 @@ cast_timedelta_to_timedelta(PyArray_DatetimeMetaData *src_meta, return -1; } - /* Apply the scaling */ - if (src_dt < 0) { - *dst_dt = (src_dt * num - (denom - 1)) / denom; - } - else { - *dst_dt = src_dt * num / denom; + /* Apply the scaling, checking for overflow */ + if (_datetime_scale_with_overflow_check(&src_dt, num, denom, "timedelta64") < 0) { + return -1; } + *dst_dt = src_dt; return 0; } @@ -4071,6 +4119,9 @@ time_to_string_resolve_descriptors( if (loop_descrs[1] == NULL) { return -1; } + if (given_descrs[1] != NULL) { + size = (size < given_descrs[1]->elsize) ? size : given_descrs[1]->elsize; + } loop_descrs[1]->elsize = size; } diff --git a/numpy/_core/src/multiarray/datetime_busday.c b/numpy/_core/src/multiarray/datetime_busday.c index 73c88811a0a9..ad4c66be0e1b 100644 --- a/numpy/_core/src/multiarray/datetime_busday.c +++ b/numpy/_core/src/multiarray/datetime_busday.c @@ -1281,8 +1281,7 @@ array_is_busday(PyObject *NPY_UNUSED(self), else { PyArray_Descr *datetime_dtype; - /* Use the datetime dtype with generic units so it fills it in */ - datetime_dtype = PyArray_DescrFromType(NPY_DATETIME); + datetime_dtype = create_datetime_dtype_with_unit(NPY_DATETIME, NPY_FR_D); if (datetime_dtype == NULL) { goto fail; } diff --git a/numpy/_core/src/multiarray/datetime_busdaycal.c b/numpy/_core/src/multiarray/datetime_busdaycal.c index 3a7e3a383dca..9d1e4e90c202 100644 --- a/numpy/_core/src/multiarray/datetime_busdaycal.c +++ b/numpy/_core/src/multiarray/datetime_busdaycal.c @@ -159,15 +159,15 @@ PyArray_WeekMaskConverter(PyObject *weekmask_in, npy_bool *weekmask) int i; for (i = 0; i < 7; ++i) { - long val; + int val; PyObject *f = PySequence_GetItem(obj, i); if (f == NULL) { Py_DECREF(obj); return 0; } - val = PyLong_AsLong(f); - if (error_converting(val)) { + val = PyObject_IsTrue(f); + if (val == -1) { Py_DECREF(f); Py_DECREF(obj); return 0; @@ -283,8 +283,7 @@ PyArray_HolidaysConverter(PyObject *dates_in, npy_holidayslist *holidays) else { PyArray_Descr *datetime_dtype; - /* Use the datetime dtype with generic units so it fills it in */ - datetime_dtype = PyArray_DescrFromType(NPY_DATETIME); + datetime_dtype = create_datetime_dtype_with_unit(NPY_DATETIME, NPY_FR_D); if (datetime_dtype == NULL) { goto fail; } diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 1fc5b76d1f00..a347ff4cca52 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -413,6 +413,7 @@ _convert_from_array_descr(PyObject *obj, int align) int totalsize = 0; PyObject *fields = PyDict_New(); if (!fields) { + Py_DECREF(nameslist); return NULL; } for (int i = 0; i < n; i++) { @@ -844,7 +845,7 @@ _try_convert_from_inherit_tuple(PyArray_Descr *type, PyObject *newobj) return (PyArray_Descr *)Py_NotImplemented; } if (!PyDataType_ISLEGACY(type) || !PyDataType_ISLEGACY(conv)) { - /* + /* * This specification should probably be never supported, but * certainly not for new-style DTypes. */ @@ -1412,8 +1413,9 @@ descr_is_legacy_parametric_instance(PyArray_Descr *descr, } /* Flexible descr with generic time unit (which can be adapted) */ if (PyDataType_ISDATETIME(descr)) { - PyArray_DatetimeMetaData *meta; - meta = get_datetime_metadata_from_dtype(descr); + _PyArray_LegacyDescr *ldescr = (_PyArray_LegacyDescr *)descr; + PyArray_DatetimeMetaData *meta = + &(((PyArray_DatetimeDTypeMetaData *)ldescr->c_metadata)->meta); if (meta->base == NPY_FR_GENERIC) { return 1; } @@ -1428,12 +1430,13 @@ descr_is_legacy_parametric_instance(PyArray_Descr *descr, * both results can be NULL (if the input is). But it always sets the DType * when a descriptor is set. * + * This function cannot fail. + * * @param dtype Input descriptor to be converted * @param out_descr Output descriptor * @param out_DType DType of the output descriptor - * @return 0 on success -1 on failure */ -NPY_NO_EXPORT int +NPY_NO_EXPORT void PyArray_ExtractDTypeAndDescriptor(PyArray_Descr *dtype, PyArray_Descr **out_descr, PyArray_DTypeMeta **out_DType) { @@ -1449,7 +1452,6 @@ PyArray_ExtractDTypeAndDescriptor(PyArray_Descr *dtype, Py_INCREF(*out_descr); } } - return 0; } @@ -1493,12 +1495,8 @@ PyArray_DTypeOrDescrConverterRequired(PyObject *obj, npy_dtype_info *dt_info) * be considered an instance with actual 0 length. * TODO: It would be nice to fix that eventually. */ - int res = PyArray_ExtractDTypeAndDescriptor( - descr, &dt_info->descr, &dt_info->dtype); + PyArray_ExtractDTypeAndDescriptor(descr, &dt_info->descr, &dt_info->dtype); Py_DECREF(descr); - if (res < 0) { - return NPY_FAIL; - } return NPY_SUCCEED; } @@ -1829,14 +1827,6 @@ _convert_from_str(PyObject *obj, int align) check_num = NPY_STRING; break; - case NPY_DEPRECATED_STRINGLTR2: - if (DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " - "Use the 'S' alias instead.") < 0) { - return NULL; - } - check_num = NPY_STRING; - break; - /* * When specifying length of UNICODE * the number of characters is given to match @@ -1907,13 +1897,6 @@ _convert_from_str(PyObject *obj, int align) goto fail; } - if (strcmp(type, "a") == 0) { - if (DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " - "Use the 'S' alias instead.") < 0) { - return NULL; - } - } - /* * Probably only ever dispatches to `_convert_from_type`, but who * knows what users are injecting into `np.typeDict`. @@ -1966,7 +1949,7 @@ NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew(PyArray_Descr *base_descr) { if (!PyDataType_ISLEGACY(base_descr)) { - /* + /* * The main use of this function is mutating strings, so probably * disallowing this is fine in practice. */ @@ -2104,7 +2087,7 @@ arraydescr_subdescr_get(PyArray_Descr *self, void *NPY_UNUSED(ignored)) NPY_NO_EXPORT PyObject * arraydescr_protocol_typestr_get(PyArray_Descr *self, void *NPY_UNUSED(ignored)) { - if (!PyDataType_ISLEGACY(NPY_DTYPE(self))) { + if (!PyDataType_ISLEGACY(self)) { return (PyObject *) Py_TYPE(self)->tp_str((PyObject *)self); } @@ -2544,6 +2527,7 @@ arraydescr_new(PyTypeObject *subtype, PyObject *odescr; PyObject *oalign = NULL; + PyObject *ocopy = NULL; PyObject *metadata = NULL; PyArray_Descr *conv; npy_bool align = NPY_FALSE; @@ -2552,21 +2536,36 @@ arraydescr_new(PyTypeObject *subtype, static char *kwlist[] = {"dtype", "align", "copy", "metadata", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O!:dtype", kwlist, + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OOO!:dtype", kwlist, &odescr, &oalign, - PyArray_BoolConverter, ©, + &ocopy, &PyDict_Type, &metadata)) { return NULL; } + if (ocopy != NULL && !PyArray_BoolConverter(ocopy, ©)) { + return NULL; + } if (oalign != NULL) { /* * In the future, reject non Python (or NumPy) boolean, including integers to avoid any * possibility of thinking that an integer alignment makes sense here. + * We omit the case of `oalign == 0` and `ocopy == 1` if there are exact ints. + * This can fail, in which case res is -1 and we enter the deprecation path. */ - if (!PyBool_Check(oalign) && !PyArray_IsScalar(oalign, Bool)) { + int res = 0; + int overflow; + if (!PyBool_Check(oalign) && !PyArray_IsScalar(oalign, Bool) && !( + // Some old pickles use 0, 1 exactly, assume no user passes it + // (It may also be possible to use `copyreg` instead.) + PyLong_CheckExact(oalign) && (res = PyLong_IsZero(oalign)) == 1 && + ocopy != NULL && PyLong_CheckExact(ocopy) && + (res = PyLong_AsLongAndOverflow(ocopy, &overflow)) == 1)) { /* Deprecated 2025-07-01: NumPy 2.4 */ + if (res == -1 && PyErr_Occurred()) { + return NULL; // Should actually be impossible (as inputs are `long`) + } if (PyErr_WarnFormat(npy_static_pydata.VisibleDeprecationWarning, 1, "dtype(): align should be passed as Python or NumPy boolean but got `align=%.100R`. " "Did you mean to pass a tuple to create a subarray type? (Deprecated NumPy 2.4)", @@ -2932,13 +2931,10 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) } break; default: - /* raise an error */ - if (PyTuple_GET_SIZE(PyTuple_GET_ITEM(args,0)) > 5) { - version = PyLong_AsLong(PyTuple_GET_ITEM(args, 0)); - } - else { - version = -1; - } + PyErr_SetString(PyExc_ValueError, + "Invalid state while unpickling. Is the pickle corrupted " + "or created with a newer NumPy version?"); + return NULL; } /* @@ -3797,6 +3793,42 @@ descr_subscript(PyArray_Descr *self, PyObject *op) } } +static PyObject * +array_typestr_get(PyArray_Descr *self) +{ + return arraydescr_protocol_typestr_get(self, NULL); +} + + +NPY_NO_EXPORT PyObject * +array_protocol_descr_get(PyArray_Descr *self) +{ + PyObject *res; + PyObject *dobj; + + res = arraydescr_protocol_descr_get(self, NULL); + if (res) { + return res; + } + PyErr_Clear(); + + /* get default */ + dobj = PyTuple_New(2); + if (dobj == NULL) { + return NULL; + } + PyTuple_SET_ITEM(dobj, 0, PyUnicode_FromString("")); + PyTuple_SET_ITEM(dobj, 1, array_typestr_get(self)); + res = PyList_New(1); + if (res == NULL) { + Py_DECREF(dobj); + return NULL; + } + PyList_SET_ITEM(res, 0, dobj); + return res; +} + + static PySequenceMethods descr_as_sequence = { (lenfunc) descr_length, /* sq_length */ (binaryfunc) NULL, /* sq_concat */ diff --git a/numpy/_core/src/multiarray/descriptor.h b/numpy/_core/src/multiarray/descriptor.h index 284afabe96fc..408d9ea0da56 100644 --- a/numpy/_core/src/multiarray/descriptor.h +++ b/numpy/_core/src/multiarray/descriptor.h @@ -20,7 +20,7 @@ PyArray_DTypeOrDescrConverterOptional(PyObject *, npy_dtype_info *dt_info); NPY_NO_EXPORT int PyArray_DTypeOrDescrConverterRequired(PyObject *, npy_dtype_info *dt_info); -NPY_NO_EXPORT int +NPY_NO_EXPORT void PyArray_ExtractDTypeAndDescriptor(PyArray_Descr *dtype, PyArray_Descr **out_descr, PyArray_DTypeMeta **out_DType); @@ -29,6 +29,8 @@ NPY_NO_EXPORT PyObject *arraydescr_protocol_typestr_get( NPY_NO_EXPORT PyObject *arraydescr_protocol_descr_get( PyArray_Descr *self, void *); +NPY_NO_EXPORT PyObject *array_protocol_descr_get(PyArray_Descr *self); + /* * offset: A starting offset. * alignment: A power-of-two alignment. diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index 62cd137daa7c..fc870b90ddfb 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -392,7 +392,8 @@ device_converter(PyObject *obj, DLDevice *result_device) return NPY_SUCCEED; } - PyErr_SetString(PyExc_ValueError, "unsupported device requested"); + /* Must be a BufferError */ + PyErr_SetString(PyExc_BufferError, "unsupported device requested"); return NPY_FAIL; } @@ -413,11 +414,10 @@ array_dlpack(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("__dlpack__", args, len_args, kwnames, - "$stream", NULL, &stream, - "$max_version", NULL, &max_version, - "$dl_device", &device_converter, &result_device, - "$copy", &PyArray_CopyConverter, ©_mode, - NULL, NULL, NULL)) { + {"$stream", NULL, &stream}, + {"$max_version", NULL, &max_version}, + {"$dl_device", &device_converter, &result_device}, + {"$copy", &PyArray_CopyConverter, ©_mode})) { return NULL; } @@ -492,10 +492,9 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj, *copy = Py_None, *device = Py_None; NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("from_dlpack", args, len_args, kwnames, - "obj", NULL, &obj, - "$copy", NULL, ©, - "$device", NULL, &device, - NULL, NULL, NULL) < 0) { + {"obj", NULL, &obj}, + {"$copy", NULL, ©}, + {"$device", NULL, &device}) < 0) { return NULL; } @@ -578,7 +577,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), const int ndim = dl_tensor.ndim; if (ndim > NPY_MAXDIMS) { - PyErr_SetString(PyExc_RuntimeError, + PyErr_SetString(PyExc_BufferError, "maxdims of DLPack tensor is higher than the supported " "maxdims."); Py_DECREF(capsule); @@ -590,14 +589,14 @@ from_dlpack(PyObject *NPY_UNUSED(self), device_type != kDLCUDAHost && device_type != kDLROCMHost && device_type != kDLCUDAManaged) { - PyErr_SetString(PyExc_RuntimeError, + PyErr_SetString(PyExc_BufferError, "Unsupported device in DLTensor."); Py_DECREF(capsule); return NULL; } if (dl_tensor.dtype.lanes != 1) { - PyErr_SetString(PyExc_RuntimeError, + PyErr_SetString(PyExc_BufferError, "Unsupported lanes in DLTensor dtype."); Py_DECREF(capsule); return NULL; @@ -648,7 +647,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), } if (typenum == -1) { - PyErr_SetString(PyExc_RuntimeError, + PyErr_SetString(PyExc_BufferError, "Unsupported dtype in DLTensor."); Py_DECREF(capsule); return NULL; diff --git a/numpy/_core/src/multiarray/dtype_transfer.c b/numpy/_core/src/multiarray/dtype_transfer.c index dbad10842aff..daea361cefeb 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.c +++ b/numpy/_core/src/multiarray/dtype_transfer.c @@ -169,6 +169,9 @@ _any_to_object_auxdata_clone(NpyAuxData *auxdata) _any_to_object_auxdata *data = (_any_to_object_auxdata *)auxdata; _any_to_object_auxdata *res = PyMem_Malloc(sizeof(_any_to_object_auxdata)); + if (res == NULL) { + return NULL; + } res->base = data->base; res->getitem = data->getitem; @@ -348,6 +351,7 @@ object_to_any_get_loop( /* NOTE: auxdata is only really necessary to flag `move_references` */ _object_to_any_auxdata *data = PyMem_Malloc(sizeof(*data)); if (data == NULL) { + PyErr_NoMemory(); return -1; } data->base.free = &_object_to_any_auxdata_free; @@ -821,14 +825,8 @@ _strided_to_strided_datetime_cast( while (N > 0) { memcpy(&dt, src, sizeof(dt)); - if (dt != NPY_DATETIME_NAT) { - /* Apply the scaling */ - if (dt < 0) { - dt = (dt * num - (denom - 1)) / denom; - } - else { - dt = dt * num / denom; - } + if (_datetime_scale_with_overflow_check(&dt, num, denom, "datetime64") < 0) { + return -1; } memcpy(dst, &dt, sizeof(dt)); @@ -857,14 +855,8 @@ _aligned_strided_to_strided_datetime_cast( while (N > 0) { dt = *(npy_int64 *)src; - if (dt != NPY_DATETIME_NAT) { - /* Apply the scaling */ - if (dt < 0) { - dt = (dt * num - (denom - 1)) / denom; - } - else { - dt = dt * num / denom; - } + if (_datetime_scale_with_overflow_check(&dt, num, denom, "datetime64") < 0) { + return -1; } *(npy_int64 *)dst = dt; @@ -1588,6 +1580,7 @@ static NpyAuxData *_n_to_n_data_clone(NpyAuxData *data) if (NPY_cast_info_copy(&newdata->wrapped, &d->wrapped) < 0) { _n_to_n_data_free((NpyAuxData *)newdata); + return NULL; } return (NpyAuxData *)newdata; diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index bada1addd9cc..7320c5c9fa9a 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -201,6 +201,7 @@ dtypemeta_initialize_struct_from_spec( DType->flags = spec->flags; DType->dt_slots = PyMem_Calloc(1, sizeof(NPY_DType_Slots)); if (DType->dt_slots == NULL) { + PyErr_NoMemory(); return -1; } @@ -573,15 +574,10 @@ discover_datetime_and_timedelta_from_pyobject( PyArray_DTypeMeta *cls, PyObject *obj) { if (PyArray_IsScalar(obj, Datetime) || PyArray_IsScalar(obj, Timedelta)) { - PyArray_DatetimeMetaData *meta; - PyArray_Descr *descr = PyArray_DescrFromScalar(obj); - meta = get_datetime_metadata_from_dtype(descr); - if (meta == NULL) { - return NULL; - } - PyArray_Descr *new_descr = create_datetime_dtype(cls->type_num, meta); - Py_DECREF(descr); - return new_descr; + /* Extract metadata directly from the scalar object. */ + PyArray_DatetimeMetaData *meta = + &((PyDatetimeScalarObject *)obj)->obmeta; + return create_datetime_dtype(cls->type_num, meta); } else { return find_object_datetime_type(obj, cls->type_num); @@ -1126,6 +1122,7 @@ dtypemeta_wrap_legacy_descriptor( NPY_DType_Slots *dt_slots = PyMem_Malloc(sizeof(NPY_DType_Slots)); if (dt_slots == NULL) { + PyErr_NoMemory(); return NULL; } memset(dt_slots, '\0', sizeof(NPY_DType_Slots)); @@ -1134,6 +1131,7 @@ dtypemeta_wrap_legacy_descriptor( PyArray_DTypeMeta *dtype_class = PyMem_Malloc(sizeof(PyArray_DTypeMeta)); if (dtype_class == NULL) { PyMem_Free(dt_slots); + PyErr_NoMemory(); return NULL; } @@ -1142,7 +1140,7 @@ dtypemeta_wrap_legacy_descriptor( * a prototype instances for everything except our own fields which * vary between the DTypes. * In particular any Object initialization must be strictly copied from - * the untouched prototype to avoid complexities (e.g. with PyPy). + * the untouched prototype to avoid complexities. * Any Type slots need to be fixed before PyType_Ready, although most * will be inherited automatically there. */ diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index bf0acb48b899..c59772203a85 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -96,6 +96,9 @@ typedef struct { */ PyArrayMethodObject *sort_meth; PyArrayMethodObject *argsort_meth; + /* Definition for real and imaginary parts, and the (internal) ufuncs */ + PyBoundArrayMethodObject *real_meth; + PyBoundArrayMethodObject *imag_meth; } NPY_DType_Slots; // This must be updated if new slots before within_dtype_castingimpl @@ -287,7 +290,7 @@ PyDataType_GetArrFuncs(const PyArray_Descr *descr) static inline PyObject * PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) { - return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->getitem( + return PyDataType_GetArrFuncs(PyArray_DESCR(arr))->getitem( (void *)itemptr, (PyArrayObject *)arr); } diff --git a/numpy/_core/src/multiarray/fnv.c b/numpy/_core/src/multiarray/fnv.c index 2b7848519e61..cef99f93d455 100644 --- a/numpy/_core/src/multiarray/fnv.c +++ b/numpy/_core/src/multiarray/fnv.c @@ -72,14 +72,15 @@ npy_fnv1a_64(const void *buf, size_t len, npy_uint64 hval) /* * Compute a size_t FNV-1a hash of the given data - * This will use 32-bit or 64-bit hash depending on the size of size_t + * This will use 32-bit or 64-bit hash depending on the size of npy_uintp. + * npy_uintp has the same size as size_t. */ size_t npy_fnv1a(const void *buf, size_t len) { -#if NPY_SIZEOF_SIZE_T == 8 +#if NPY_SIZEOF_UINTP == 8 return (size_t)npy_fnv1a_64(buf, len, FNV1A_64_INIT); -#else /* NPY_SIZEOF_SIZE_T == 4 */ +#else /* NPY_SIZEOF_UINTP == 4 */ return (size_t)npy_fnv1a_32(buf, len, FNV1A_32_INIT); #endif } diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 1aff38476d50..721d7a73738c 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -22,6 +22,7 @@ #include "getset.h" #include "arrayobject.h" #include "mem_overlap.h" +#include "number.h" #include "alloc.h" #include "npy_buffer.h" #include "shape.h" @@ -49,17 +50,13 @@ array_shape_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) } -static int -array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) +NPY_NO_EXPORT int +array_shape_set_internal(PyArrayObject *self, PyObject *val) { int nd; PyArrayObject *ret; + assert(val); - if (val == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete array shape"); - return -1; - } /* Assumes C-order */ ret = (PyArrayObject *)PyArray_Reshape(self, val); if (ret == NULL) { @@ -106,6 +103,25 @@ array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) return 0; } +static int +array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) +{ + if (val == NULL) { + PyErr_SetString(PyExc_AttributeError, + "Cannot delete array shape"); + return -1; + } + + /* Deprecated NumPy 2.5, 2026-01-05 */ + if (DEPRECATE("Setting the shape on a NumPy array has been deprecated" + " in NumPy 2.5.\nAs an alternative, you can create a new" + " view using np.reshape (with copy=False if needed)." + ) < 0 ) { + return -1; + } + + return array_shape_set_internal(self, val); +} static PyObject * array_strides_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) @@ -200,12 +216,6 @@ array_priority_get(PyArrayObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) return PyFloat_FromDouble(NPY_PRIORITY); } -static PyObject * -array_typestr_get(PyArrayObject *self) -{ - return arraydescr_protocol_typestr_get(PyArray_DESCR(self), NULL); -} - static PyObject * array_descr_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) { @@ -213,34 +223,6 @@ array_descr_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) return (PyObject *)PyArray_DESCR(self); } -static PyObject * -array_protocol_descr_get(PyArrayObject *self) -{ - PyObject *res; - PyObject *dobj; - - res = arraydescr_protocol_descr_get(PyArray_DESCR(self), NULL); - if (res) { - return res; - } - PyErr_Clear(); - - /* get default */ - dobj = PyTuple_New(2); - if (dobj == NULL) { - return NULL; - } - PyTuple_SET_ITEM(dobj, 0, PyUnicode_FromString("")); - PyTuple_SET_ITEM(dobj, 1, array_typestr_get(self)); - res = PyList_New(1); - if (res == NULL) { - Py_DECREF(dobj); - return NULL; - } - PyList_SET_ITEM(res, 0, dobj); - return res; -} - static PyObject * array_protocol_strides_get(PyArrayObject *self) { @@ -280,65 +262,49 @@ array_ctypes_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) static PyObject * array_interface_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) { - PyObject *dict; - PyObject *obj; - - dict = PyDict_New(); - if (dict == NULL) { - return NULL; - } + PyObject *dataptr = NULL; + PyObject *strides = NULL; + PyObject *shape = NULL; + PyObject *descr = NULL; + PyObject *typestr = NULL; + PyObject *dict = NULL; - int ret; - - /* dataptr */ - obj = array_dataptr_get(self, NULL); - ret = PyDict_SetItemString(dict, "data", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; + dataptr = array_dataptr_get(self, NULL); + if (dataptr == NULL) { + goto finish; } - obj = array_protocol_strides_get(self); - ret = PyDict_SetItemString(dict, "strides", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; + strides = array_protocol_strides_get(self); + if (strides == NULL) { + goto finish; } - obj = array_protocol_descr_get(self); - ret = PyDict_SetItemString(dict, "descr", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; + descr = array_protocol_descr_get(PyArray_DESCR(self)); + if (descr == NULL) { + goto finish; } - obj = arraydescr_protocol_typestr_get(PyArray_DESCR(self), NULL); - ret = PyDict_SetItemString(dict, "typestr", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; + typestr = arraydescr_protocol_typestr_get(PyArray_DESCR(self), NULL); + if (typestr == NULL) { + goto finish; } - obj = array_shape_get(self, NULL); - ret = PyDict_SetItemString(dict, "shape", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; + shape = array_shape_get(self, NULL); + if (shape == NULL) { + goto finish; } - obj = PyLong_FromLong(3); - ret = PyDict_SetItemString(dict, "version", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; - } + dict = build_array_interface( + dataptr, descr, strides, typestr, shape + ); + goto finish; +finish: + Py_XDECREF(dataptr); + Py_XDECREF(strides); + Py_XDECREF(shape); + Py_XDECREF(descr); + Py_XDECREF(typestr); return dict; } @@ -375,16 +341,11 @@ array_nbytes_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) * (contiguous or fortran) with compatible dimensions The shape and strides * will be adjusted in that case as well. */ -static int -array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) +NPY_NO_EXPORT int +array_descr_set_internal(PyArrayObject *self, PyObject *arg) { PyArray_Descr *newtype = NULL; - - if (arg == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete array dtype"); - return -1; - } + assert(arg); if (!(PyArray_DescrConverter(arg, &newtype)) || newtype == NULL) { @@ -530,6 +491,25 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) return -1; } +static int +array_descr_set(PyArrayObject *self, PyObject *arg) +{ + if (arg == NULL) { + PyErr_SetString(PyExc_AttributeError, + "Cannot delete array dtype"); + return -1; + } + + /* DEPRECATED 2026-02-06, NumPy 2.5 */ + if (DEPRECATE( + "Setting the dtype on a NumPy array has been deprecated in NumPy 2.5.\n" + "Instead of changing the dtype on an array x, create a new array " + "with x.view(new_dtype)") < 0) { + return -1; + } + return array_descr_set_internal(self, arg); +} + static PyObject * array_struct_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) { @@ -608,132 +588,109 @@ array_base_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) } } + /* - * Create a view of a complex array with an equivalent data-type - * except it is real instead of complex. + * Fetches the real or imaginary part of an array. If `need_view` is set the return + * cannot be a copy (must be a view). + * If `need_view` is zero it will return the `ufunc`s result (a new array) and set it + * to read-only. */ -static PyArrayObject * -_get_part(PyArrayObject *self, int imag) +static PyObject * +_get_part(PyArrayObject *self, PyObject *ufunc, PyBoundArrayMethodObject *meth, int need_view) { - int float_type_num; - PyArray_Descr *type; - PyArrayObject *ret; - int offset; - - switch (PyArray_DESCR(self)->type_num) { - case NPY_CFLOAT: - float_type_num = NPY_FLOAT; - break; - case NPY_CDOUBLE: - float_type_num = NPY_DOUBLE; - break; - case NPY_CLONGDOUBLE: - float_type_num = NPY_LONGDOUBLE; - break; - default: - PyErr_Format(PyExc_ValueError, - "Cannot convert complex type number %d to float", - PyArray_DESCR(self)->type_num); - return NULL; - - } - type = PyArray_DescrFromType(float_type_num); - if (type == NULL) { + PyObject *ret = NULL; + PyArray_Descr *descrs[2] = {PyArray_DESCR(self), NULL}; + PyArray_Descr *loop_descrs[2] = {NULL, NULL}; + npy_intp view_offset = NPY_MIN_INTP; + int res = meth->method->resolve_descriptors( + meth->method, meth->dtypes, descrs, loop_descrs, &view_offset); + if (res < 0) { return NULL; } - - offset = (imag ? type->elsize : 0); - - if (!PyArray_ISNBO(PyArray_DESCR(self)->byteorder)) { - Py_SETREF(type, PyArray_DescrNew(type)); - if (type == NULL) { - return NULL; + if (view_offset != NPY_MIN_INTP) { + Py_INCREF(loop_descrs[1]); + ret = PyArray_NewFromDescr_int( + Py_TYPE(self), loop_descrs[1], + PyArray_NDIM(self), PyArray_DIMS(self), + PyArray_STRIDES(self), PyArray_BYTES(self) + view_offset, + PyArray_FLAGS(self), (PyObject *)self, (PyObject *)self, + _NPY_ARRAY_ENSURE_DTYPE_IDENTITY); + } + else if (!need_view) { + // resolve_descriptors was successful, but view_offset is not set so we call + // the ufunc to let it deal with the (potential) complexity. + ret = PyArray_GenericUnaryFunction(self, ufunc); + if (ret != NULL && PyArray_Check(ret)) { + // Make result read-only, since otherwise `arr.imag[...] = val` + // would for example work. + PyArray_CLEARFLAGS((PyArrayObject *)ret, NPY_ARRAY_WRITEABLE); } - type->byteorder = PyArray_DESCR(self)->byteorder; - } - ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( - Py_TYPE(self), - type, - PyArray_NDIM(self), - PyArray_DIMS(self), - PyArray_STRIDES(self), - PyArray_BYTES(self) + offset, - PyArray_FLAGS(self), (PyObject *)self, (PyObject *)self); - if (ret == NULL) { - return NULL; } + + Py_DECREF(loop_descrs[0]); + Py_DECREF(loop_descrs[1]); return ret; } -/* For Object arrays, we need to get and set the - real part of each element. - */ static PyObject * array_real_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) { - PyArrayObject *ret; + PyBoundArrayMethodObject *meth = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DTYPE(self)))->real_meth; - if (PyArray_ISCOMPLEX(self)) { - ret = _get_part(self, 0); - return (PyObject *)ret; - } - else { - Py_INCREF(self); - return (PyObject *)self; + if (meth == NULL) { + // If the method is not set, we just assume this can be seen as "real" + // it really may be nice to change that one day. + return Py_NewRef((PyObject *)self); } -} + return _get_part(self, n_ops.real, meth, /* need_view */ 0); +} static int array_real_set(PyArrayObject *self, PyObject *val, void *NPY_UNUSED(ignored)) { - PyArrayObject *ret; - PyArrayObject *new; - int retcode; - if (val == NULL) { PyErr_SetString(PyExc_AttributeError, "Cannot delete array real part"); return -1; } - if (PyArray_ISCOMPLEX(self)) { - ret = _get_part(self, 0); - if (ret == NULL) { - return -1; - } - } - else { + + PyArrayObject *part; + PyBoundArrayMethodObject *meth = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DTYPE(self)))->real_meth; + + if (meth == NULL) { + // See above, we may want to not guess this always... Py_INCREF(self); - ret = self; + part = self; } - new = (PyArrayObject *)PyArray_FROM_O(val); - if (new == NULL) { - Py_DECREF(ret); - return -1; + else { + part = (PyArrayObject *)_get_part( + self, n_ops.real, meth, /* need_view */ 1); + if (part == NULL) { + if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "Cannot set real part when `.real` isn't a view."); + } + return -1; + } } - retcode = PyArray_CopyInto(ret, new); - Py_DECREF(ret); - Py_DECREF(new); - return retcode; + + int ret = PyArray_CopyObject(part, val); + Py_DECREF(part); + return ret; } -/* For Object arrays we need to get - and set the imaginary part of - each element -*/ static PyObject * array_imag_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) { - PyArrayObject *ret; + PyBoundArrayMethodObject *meth = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DTYPE(self)))->imag_meth; - if (PyArray_ISCOMPLEX(self)) { - ret = _get_part(self, 1); - } - else { + if (meth == NULL) { + // We assume this is a real type, so return a zeroed array. Py_INCREF(PyArray_DESCR(self)); - ret = (PyArrayObject *)PyArray_NewFromDescr_int( + PyObject *ret = PyArray_NewFromDescr_int( Py_TYPE(self), PyArray_DESCR(self), PyArray_NDIM(self), @@ -744,9 +701,11 @@ array_imag_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) if (ret == NULL) { return NULL; } - PyArray_CLEARFLAGS(ret, NPY_ARRAY_WRITEABLE); + PyArray_CLEARFLAGS((PyArrayObject *)ret, NPY_ARRAY_WRITEABLE); + return ret; } - return (PyObject *) ret; + + return _get_part(self, n_ops.imag, meth, /* need_view */ 0); } static int @@ -757,30 +716,28 @@ array_imag_set(PyArrayObject *self, PyObject *val, void *NPY_UNUSED(ignored)) "Cannot delete array imaginary part"); return -1; } - if (PyArray_ISCOMPLEX(self)) { - PyArrayObject *ret; - PyArrayObject *new; - int retcode; + PyArrayObject *part; + PyBoundArrayMethodObject *meth = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DTYPE(self)))->imag_meth; - ret = _get_part(self, 1); - if (ret == NULL) { - return -1; - } - new = (PyArrayObject *)PyArray_FROM_O(val); - if (new == NULL) { - Py_DECREF(ret); - return -1; - } - retcode = PyArray_CopyInto(ret, new); - Py_DECREF(ret); - Py_DECREF(new); - return retcode; - } - else { + if (meth == NULL) { PyErr_SetString(PyExc_TypeError, - "array does not have imaginary part to set"); + "Cannot set imaginary part of non-complex array."); return -1; } + + part = (PyArrayObject *)_get_part( + self, n_ops.imag, meth, /* need_view */ 1); + if (part == NULL) { + if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "Cannot set imaginary part when `.imag` isn't a view."); + } + return -1; + } + + int ret = PyArray_CopyObject(part, val); + Py_DECREF(part); + return ret; } static PyObject * diff --git a/numpy/_core/src/multiarray/getset.h b/numpy/_core/src/multiarray/getset.h index a95c98020a18..a9ae39481837 100644 --- a/numpy/_core/src/multiarray/getset.h +++ b/numpy/_core/src/multiarray/getset.h @@ -3,4 +3,7 @@ extern NPY_NO_EXPORT PyGetSetDef array_getsetlist[]; +NPY_NO_EXPORT int array_descr_set_internal(PyArrayObject *self, PyObject *arg); +NPY_NO_EXPORT int array_shape_set_internal(PyArrayObject *self, PyObject *val); + #endif /* NUMPY_CORE_SRC_MULTIARRAY_GETSET_H_ */ diff --git a/numpy/_core/src/multiarray/hashdescr.c b/numpy/_core/src/multiarray/hashdescr.c index 853e247e0b74..be203eb197c3 100644 --- a/numpy/_core/src/multiarray/hashdescr.c +++ b/numpy/_core/src/multiarray/hashdescr.c @@ -1,12 +1,13 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include +#include #include -#include "npy_atomic.h" #include "npy_config.h" @@ -303,14 +304,14 @@ PyArray_DescrHash(PyObject* odescr) } descr = (PyArray_Descr*)odescr; - hash = npy_atomic_load_hash_t(&descr->hash); + hash = atomic_load_explicit((_Atomic(npy_hash_t) *)&descr->hash, memory_order_relaxed); if (hash == -1) { hash = _PyArray_DescrHashImp(descr); if (hash == -1) { return -1; } - npy_atomic_store_hash_t(&descr->hash, hash); + atomic_store_explicit((_Atomic(npy_hash_t) *)&descr->hash, hash, memory_order_relaxed); } return hash; diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index f9d753f9e7ba..454a434304bb 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -232,7 +232,7 @@ NPY_NO_EXPORT PyObject * PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, PyArrayObject *out, NPY_CLIPMODE clipmode) { - PyArray_Descr *dtype; + PyArray_Descr *dtype, *out_dtype; PyArrayObject *obj = NULL, *self, *indices; npy_intp nd, i, n, m, max_item, chunk, itemsize, nelem; npy_intp shape[NPY_MAXDIMS]; @@ -310,6 +310,19 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, flags |= NPY_ARRAY_ENSURECOPY; } dtype = PyArray_DESCR(self); + out_dtype = PyArray_DESCR(out); + if (dtype != out_dtype) { + /* Deprecated NumPy 2.5, 2026-01 */ + if (!PyArray_CanCastTypeTo(dtype, out_dtype, NPY_SAME_KIND_CASTING)) { + if (DEPRECATE( + "Implicit casting of output to a different kind is deprecated. " + "In a future version, this will result in an error. (Deprecated NumPy 2.5)") < + 0) { + goto fail; + } + } + flags |= NPY_ARRAY_FORCECAST; + } Py_INCREF(dtype); obj = (PyArrayObject *)PyArray_FromArray(out, dtype, flags); if (obj == NULL) { @@ -1371,7 +1384,6 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, if (needcopy) { PyArray_ClearBuffer(odescr, buffer, elsize, N, 1); PyDataMem_UserFREE(buffer, N * elsize, mem_handler); - Py_DECREF(odescr); } if (ret < 0 && !PyErr_Occurred()) { /* Out of memory during sorting or buffer creation */ @@ -1384,6 +1396,7 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, if (PyErr_Occurred() && ret == 0) { ret = -1; } + Py_XDECREF(odescr); Py_DECREF(it); Py_DECREF(mem_handler); NPY_cast_info_xfree(&to_cast_info); @@ -1594,7 +1607,6 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, if (needcopy) { PyArray_ClearBuffer(odescr, valbuffer, elsize, N, 1); PyDataMem_UserFREE(valbuffer, N * elsize, mem_handler); - Py_DECREF(odescr); } PyDataMem_UserFREE(idxbuffer, N * sizeof(npy_intp), mem_handler); if (ret < 0) { @@ -1605,6 +1617,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, Py_XDECREF(rop); rop = NULL; } + Py_XDECREF(odescr); Py_XDECREF(it); Py_XDECREF(rit); Py_DECREF(mem_handler); @@ -2407,7 +2420,7 @@ count_nonzero_bytes_384(const npy_uint64 * w) */ if (NPY_UNLIKELY( ((w1 | w2 | w3 | w4 | w5 | w6) & 0xFEFEFEFEFEFEFEFEULL) != 0)) { - /* reload from pointer to avoid a unnecessary stack spill with gcc */ + /* reload from pointer to avoid an unnecessary stack spill with gcc */ const char * c = (const char *)w; npy_uintp i, count = 0; for (i = 0; i < 48; i++) { @@ -3170,7 +3183,7 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) PyArray_Descr *given_descrs[2] = {descr, descr}; // Sort cannot be a view, so view_offset is unused npy_intp view_offset = 0; - + if (sort_method->resolve_descriptors( sort_method, dtypes, given_descrs, loop_descrs, &view_offset) < 0) { PyErr_SetString(PyExc_RuntimeError, @@ -3179,6 +3192,7 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) } context.descriptors = loop_descrs; context.parameters = &sort_params; + context.method = sort_method; // Arrays are always contiguous for sorting npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; @@ -3279,7 +3293,7 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) PyArray_Descr *given_descrs[2] = {descr, odescr}; // we can ignore the view_offset for sorting npy_intp view_offset = 0; - + int resolve_ret = argsort_method->resolve_descriptors( argsort_method, dtypes, given_descrs, loop_descrs, &view_offset); Py_DECREF(odescr); @@ -3290,6 +3304,7 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) } context.descriptors = loop_descrs; context.parameters = &sort_params; + context.method = argsort_method; // Arrays are always contiguous for sorting npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index 1b4ed59fbfe0..589961f51615 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -129,11 +129,11 @@ PyArray_IterNew(PyObject *obj) } it = (PyArrayIterObject *)PyArray_malloc(sizeof(PyArrayIterObject)); - PyObject_Init((PyObject *)it, &PyArrayIter_Type); - /* it = PyObject_New(PyArrayIterObject, &PyArrayIter_Type);*/ if (it == NULL) { + PyErr_NoMemory(); return NULL; } + PyObject_Init((PyObject *)it, &PyArrayIter_Type); Py_INCREF(ao); /* PyArray_RawIterBaseInit steals a reference */ PyArray_RawIterBaseInit(it, ao); @@ -764,6 +764,8 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) npy_index_info indices[NPY_MAXDIMS * 2 + 1]; PyArray_Descr *dtype = PyArray_DESCR(self->ao); + PyArrayObject *arrval = NULL; + PyArrayIterObject *val_it = NULL; npy_intp dtype_size = dtype->elsize; NPY_cast_info cast_info = {.func = NULL}; @@ -830,12 +832,12 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) } Py_INCREF(dtype); - PyArrayObject *arrval = (PyArrayObject *)PyArray_FromAny(val, dtype, 0, 0, + arrval = (PyArrayObject *)PyArray_FromAny(val, dtype, 0, 0, NPY_ARRAY_FORCECAST, NULL); if (arrval == NULL) { goto finish; } - PyArrayIterObject *val_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arrval); + val_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arrval); if (val_it == NULL) { goto finish; } @@ -908,6 +910,8 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) for (int i = 0; i < index_num; i++) { Py_XDECREF(indices[i].object); } + Py_XDECREF(val_it); + Py_XDECREF(arrval); return ret; } @@ -1350,7 +1354,7 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, if (fast_seq == NULL) { return NULL; } - NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(args) + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(args); n = PySequence_Fast_GET_SIZE(fast_seq); if (n > NPY_MAXARGS) { ret = multiiter_wrong_number_of_args(); @@ -1358,7 +1362,7 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, ret = multiiter_new_impl(n, PySequence_Fast_ITEMS(fast_seq)); } Py_DECREF(fast_seq); - NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); return ret; } diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index d6128f74621a..07f08765f875 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -270,7 +270,7 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, npy_intp n; PyObject *obj = NULL; - PyArrayObject *arr; + PyArrayObject *arr = NULL; // free'd on error use Py_CLEAR to decref. int index_type = 0; int ellipsis_pos = -1; @@ -489,6 +489,7 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, index_type = HAS_BOOL; indices[curr_idx].type = HAS_BOOL; indices[curr_idx].object = (PyObject *)arr; + arr = NULL; // Reference moved, clean up for error path. /* keep track anyway, just to be complete */ used_ndim = array_ndims; @@ -523,7 +524,7 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, indices[curr_idx].value = n; indices[curr_idx].object = PyArray_Zeros(1, &n, PyArray_DescrFromType(NPY_INTP), 0); - Py_DECREF(arr); + Py_CLEAR(arr); if (indices[curr_idx].object == NULL) { goto failed_building_indices; @@ -541,7 +542,6 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, n = _nonzero_indices((PyObject *)arr, nonzero_result); if (n < 0) { - Py_DECREF(arr); goto failed_building_indices; } @@ -552,7 +552,6 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, for (i=0; i < n; i++) { Py_DECREF(nonzero_result[i]); } - Py_DECREF(arr); goto failed_building_indices; } @@ -566,7 +565,7 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, used_ndim += 1; curr_idx += 1; } - Py_DECREF(arr); + Py_CLEAR(arr); /* All added indices have 1 dimension */ if (fancy_ndim < 1) { @@ -587,7 +586,7 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, */ npy_intp ind = PyArray_PyIntAsIntp((PyObject *)arr); - Py_DECREF(arr); + Py_CLEAR(arr); if (error_converting(ind)) { goto failed_building_indices; } @@ -603,15 +602,17 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, } } + if (fancy_ndim < PyArray_NDIM(arr)) { + fancy_ndim = PyArray_NDIM(arr); + } + index_type |= HAS_FANCY; indices[curr_idx].type = HAS_FANCY; indices[curr_idx].value = -1; indices[curr_idx].object = (PyObject *)arr; + arr = NULL; // Reference moved, clean up for error path. used_ndim += 1; - if (fancy_ndim < PyArray_NDIM(arr)) { - fancy_ndim = PyArray_NDIM(arr); - } curr_idx += 1; continue; } @@ -632,7 +633,6 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, is_flatiter_object ? "" : ", numpy.newaxis (`None`)" ); } - Py_DECREF(arr); goto failed_building_indices; } @@ -760,6 +760,7 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, return index_type; failed_building_indices: + Py_XDECREF(arr); for (i=0; i < curr_idx; i++) { Py_XDECREF(indices[i].object); } @@ -2754,7 +2755,7 @@ PyArray_MapIterCheckIndices(PyArrayMapIterObject *mit) * * @param Index information filled by prepare_index. * @param Number of indices (gotten through prepare_index). - * @param Kind of index (gotten through preprare_index). + * @param Kind of index (gotten through prepare_index). * @param NpyIter flags for an extra array. If 0 assume that there is no * extra operand. NPY_ITER_ALLOCATE can make sense here. * @param Array being indexed diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 6dcc349dcd03..ce21261648c5 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -25,6 +25,7 @@ #include "dtypemeta.h" #include "item_selection.h" #include "conversion_utils.h" +#include "getset.h" #include "shape.h" #include "strfuncs.h" #include "array_assign.h" @@ -125,11 +126,10 @@ array_take(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("take", args, len_args, kwnames, - "indices", NULL, &indices, - "|axis", &PyArray_AxisConverter, &dimension, - "|out", &PyArray_OutputConverter, &out, - "|mode", &PyArray_ClipmodeConverter, &mode, - NULL, NULL, NULL) < 0) { + {"indices", NULL, &indices}, + {"|axis", &PyArray_AxisConverter, &dimension}, + {"|out", &PyArray_OutputConverter, &out}, + {"|mode", &PyArray_ClipmodeConverter, &mode}) < 0) { return NULL; } @@ -224,8 +224,7 @@ array_squeeze(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("squeeze", args, len_args, kwnames, - "|axis", NULL, &axis_in, - NULL, NULL, NULL) < 0) { + {"|axis", NULL, &axis_in}) < 0) { return NULL; } @@ -252,9 +251,8 @@ array_view(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("view", args, len_args, kwnames, - "|dtype", NULL, &out_dtype, - "|type", NULL, &out_type, - NULL, NULL, NULL) < 0) { + {"|dtype", NULL, &out_dtype}, + {"|type", NULL, &out_type}) < 0) { return NULL; } @@ -301,10 +299,9 @@ array_argmax(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("argmax", args, len_args, kwnames, - "|axis", &PyArray_AxisConverter, &axis, - "|out", &PyArray_OutputConverter, &out, - "$keepdims", &PyArray_BoolConverter, &keepdims, - NULL, NULL, NULL) < 0) { + {"|axis", &PyArray_AxisConverter, &axis}, + {"|out", &PyArray_OutputConverter, &out}, + {"$keepdims", &PyArray_BoolConverter, &keepdims}) < 0) { return NULL; } @@ -328,10 +325,9 @@ array_argmin(PyArrayObject *self, npy_bool keepdims = NPY_FALSE; NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("argmin", args, len_args, kwnames, - "|axis", &PyArray_AxisConverter, &axis, - "|out", &PyArray_OutputConverter, &out, - "$keepdims", &PyArray_BoolConverter, &keepdims, - NULL, NULL, NULL) < 0) { + {"|axis", &PyArray_AxisConverter, &axis}, + {"|out", &PyArray_OutputConverter, &out}, + {"$keepdims", &PyArray_BoolConverter, &keepdims}) < 0) { return NULL; } @@ -772,12 +768,11 @@ array_astype(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("astype", args, len_args, kwnames, - "dtype", &PyArray_DTypeOrDescrConverterRequired, &dt_info, - "|order", &PyArray_OrderConverter, &order, - "|casting", &PyArray_CastingConverterSameValue, &casting, - "|subok", &PyArray_PythonPyIntFromInt, &subok, - "|copy", &PyArray_AsTypeCopyConverter, &forcecopy, - NULL, NULL, NULL) < 0) { + {"dtype", &PyArray_DTypeOrDescrConverterRequired, &dt_info}, + {"|order", &PyArray_OrderConverter, &order}, + {"|casting", &PyArray_CastingConverterSameValue, &casting}, + {"|subok", &PyArray_PythonPyIntFromInt, &subok}, + {"|copy", &PyArray_AsTypeCopyConverter, &forcecopy}) < 0) { Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return NULL; @@ -1019,7 +1014,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) return -1; } for (i = 0; i < nin; ++i) { -#if defined(PYPY_VERSION) || defined(Py_LIMITED_API) +#if defined(Py_LIMITED_API) PyObject *obj = PyTuple_GetItem(args, i); if (obj == NULL) { return -1; @@ -1150,8 +1145,7 @@ array_copy(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("copy", args, len_args, kwnames, - "|order", PyArray_OrderConverter, &order, - NULL, NULL, NULL) < 0) { + {"|order", PyArray_OrderConverter, &order}) < 0) { return NULL; } @@ -1270,12 +1264,12 @@ array_sort(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("sort", args, len_args, kwnames, - "|axis", &PyArray_PythonPyIntFromInt, &axis, - "|kind", &PyArray_SortkindConverter, &sortkind, - "|order", NULL, &order, - "$stable", &PyArray_OptionalBoolConverter, &stable, -// "$descending", &PyArray_OptionalBoolConverter, &descending, - NULL, NULL, NULL) < 0) { + {"|axis", &PyArray_PythonPyIntFromInt, &axis}, + {"|kind", &PyArray_SortkindConverter, &sortkind}, + {"|order", NULL, &order}, + {"$stable", &PyArray_OptionalBoolConverter, &stable} + // {"$descending", &PyArray_OptionalBoolConverter, &descending} + ) < 0) { return NULL; } @@ -1358,11 +1352,10 @@ array_partition(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("partition", args, len_args, kwnames, - "kth", NULL, &kthobj, - "|axis", &PyArray_PythonPyIntFromInt, &axis, - "|kind", &PyArray_SelectkindConverter, &sortkind, - "|order", NULL, &order, - NULL, NULL, NULL) < 0) { + {"kth", NULL, &kthobj}, + {"|axis", &PyArray_PythonPyIntFromInt, &axis}, + {"|kind", &PyArray_SelectkindConverter, &sortkind}, + {"|order", NULL, &order}) < 0) { return NULL; } @@ -1432,12 +1425,13 @@ array_argsort(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("argsort", args, len_args, kwnames, - "|axis", &PyArray_AxisConverter, &axis, - "|kind", &PyArray_SortkindConverter, &sortkind, - "|order", NULL, &order, - "$stable", &PyArray_OptionalBoolConverter, &stable, -// "$descending", &PyArray_OptionalBoolConverter, &descending, - NULL, NULL, NULL) < 0) { + {"|axis", &PyArray_AxisConverter, &axis}, + {"|kind", &PyArray_SortkindConverter, &sortkind}, + {"|order", NULL, &order}, + {"$stable", &PyArray_OptionalBoolConverter, &stable} + // TODO: add descending sorts, gh-14728 + // {"$descending", &PyArray_OptionalBoolConverter, &descending} + ) < 0) { return NULL; } @@ -1514,11 +1508,10 @@ array_argpartition(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("argpartition", args, len_args, kwnames, - "kth", NULL, &kthobj, - "|axis", &PyArray_AxisConverter, &axis, - "|kind", &PyArray_SelectkindConverter, &sortkind, - "|order", NULL, &order, - NULL, NULL, NULL) < 0) { + {"kth", NULL, &kthobj}, + {"|axis", &PyArray_AxisConverter, &axis}, + {"|kind", &PyArray_SelectkindConverter, &sortkind}, + {"|order", NULL, &order}) < 0) { return NULL; } if (order == Py_None) { @@ -1579,10 +1572,9 @@ array_searchsorted(PyArrayObject *self, sorter = NULL; if (npy_parse_arguments("searchsorted", args, len_args, kwnames, - "v", NULL, &keys, - "|side", &PyArray_SearchsideConverter, &side, - "|sorter", NULL, &sorter, - NULL, NULL, NULL) < 0) { + {"v", NULL, &keys}, + {"|side", &PyArray_SearchsideConverter, &side}, + {"|sorter", NULL, &sorter}) < 0) { return NULL; } if (sorter == Py_None) { @@ -2072,17 +2064,6 @@ array_setstate(PyArrayObject *self, PyObject *args) return NULL; } - /* - * Reassigning fa->descr messes with the reallocation strategy, - * since fa could be a 0-d or scalar, and then - * PyDataMem_UserFREE will be confused - */ - size_t n_tofree = PyArray_NBYTES(self); - if (n_tofree == 0) { - n_tofree = 1; - } - Py_XDECREF(PyArray_DESCR(self)); - fa->descr = typecode; Py_INCREF(typecode); nd = PyArray_IntpFromSequence(shape, dimensions, NPY_MAXDIMS); if (nd < 0) { @@ -2096,31 +2077,19 @@ array_setstate(PyArrayObject *self, PyObject *args) * copy from the pickled data (may not match allocation currently if 0). * Compare with `PyArray_NewFromDescr`, raise MemoryError for simplicity. */ - npy_bool empty = NPY_FALSE; - nbytes = 1; + nbytes = typecode->elsize; for (int i = 0; i < nd; i++) { if (dimensions[i] < 0) { PyErr_SetString(PyExc_TypeError, "impossible dimension while unpickling array"); return NULL; } - if (dimensions[i] == 0) { - empty = NPY_TRUE; - } overflowed = npy_mul_sizes_with_overflow( &nbytes, nbytes, dimensions[i]); if (overflowed) { return PyErr_NoMemory(); } } - overflowed = npy_mul_sizes_with_overflow( - &nbytes, nbytes, PyArray_ITEMSIZE(self)); - if (overflowed) { - return PyErr_NoMemory(); - } - if (empty) { - nbytes = 0; - } if (PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) { if (!PyList_Check(rawdata)) { @@ -2134,11 +2103,8 @@ array_setstate(PyArrayObject *self, PyObject *args) /* Backward compatibility with Python 2 NumPy pickles */ if (PyUnicode_Check(rawdata)) { - PyObject *tmp; - tmp = PyUnicode_AsLatin1String(rawdata); - Py_DECREF(rawdata); - rawdata = tmp; - if (tmp == NULL) { + Py_SETREF(rawdata, PyUnicode_AsLatin1String(rawdata)); + if (rawdata == NULL) { /* More informative error message */ PyErr_SetString(PyExc_ValueError, ("Failed to encode latin1 string when unpickling a Numpy array. " @@ -2166,32 +2132,13 @@ array_setstate(PyArrayObject *self, PyObject *args) return NULL; } } - - if ((PyArray_FLAGS(self) & NPY_ARRAY_OWNDATA)) { - /* - * Allocation will never be 0, see comment in ctors.c - * line 820 - */ - PyObject *handler = PyArray_HANDLER(self); - if (handler == NULL) { - /* This can happen if someone arbitrarily sets NPY_ARRAY_OWNDATA */ - PyErr_SetString(PyExc_RuntimeError, - "no memory handler found but OWNDATA flag set"); - return NULL; - } - PyDataMem_UserFREE(PyArray_DATA(self), n_tofree, handler); - PyArray_CLEARFLAGS(self, NPY_ARRAY_OWNDATA); - } - Py_XDECREF(PyArray_BASE(self)); - fa->base = NULL; - - PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY); - - if (PyArray_DIMS(self) != NULL) { - npy_free_cache_dim_array(self); - fa->dimensions = NULL; + /* + * Get rid of everything on self, and then populate with pickle data. + */ + if (clear_array_attributes(self) < 0) { + return NULL; } - + fa->descr = typecode; fa->flags = NPY_ARRAY_DEFAULT; fa->nd = nd; @@ -2221,11 +2168,8 @@ array_setstate(PyArrayObject *self, PyObject *args) if (num == 0) { num = 1; } - /* Store the handler in case the default is modified */ - Py_XDECREF(fa->mem_handler); fa->mem_handler = PyDataMem_GetHandler(); if (fa->mem_handler == NULL) { - Py_CLEAR(fa->mem_handler); Py_DECREF(rawdata); return NULL; } @@ -2273,7 +2217,6 @@ array_setstate(PyArrayObject *self, PyObject *args) } else { /* The handlers should never be called in this case */ - Py_XDECREF(fa->mem_handler); fa->mem_handler = NULL; fa->data = datastr; if (PyArray_SetBaseObject(self, rawdata) < 0) { @@ -2287,9 +2230,7 @@ array_setstate(PyArrayObject *self, PyObject *args) if (num == 0) { num = 1; } - /* Store the functions in case the default handler is modified */ - Py_XDECREF(fa->mem_handler); fa->mem_handler = PyDataMem_GetHandler(); if (fa->mem_handler == NULL) { return NULL; @@ -2494,9 +2435,8 @@ array_dot(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("dot", args, len_args, kwnames, - "b", NULL, &b, - "|out", NULL, &o, - NULL, NULL, NULL) < 0) { + {"b", NULL, &b}, + {"|out", NULL, &o}) < 0) { return NULL; } @@ -2592,12 +2532,11 @@ array_trace(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("trace", args, len_args, kwnames, - "|offset", &PyArray_PythonPyIntFromInt, &offset, - "|axis1", &PyArray_PythonPyIntFromInt, &axis1, - "|axis2", &PyArray_PythonPyIntFromInt, &axis2, - "|dtype", &PyArray_DescrConverter2, &dtype, - "|out", &PyArray_OutputConverter, &out, - NULL, NULL, NULL) < 0) { + {"|offset", &PyArray_PythonPyIntFromInt, &offset}, + {"|axis1", &PyArray_PythonPyIntFromInt, &axis1}, + {"|axis2", &PyArray_PythonPyIntFromInt, &axis2}, + {"|dtype", &PyArray_DescrConverter2, &dtype}, + {"|out", &PyArray_OutputConverter, &out}) < 0) { Py_XDECREF(dtype); return NULL; } @@ -2617,6 +2556,15 @@ array_trace(PyArrayObject *self, #undef _CHKTYPENUM +static PyObject* array__set_dtype(PyObject *self, PyObject *args) +{ + int r = array_descr_set_internal((PyArrayObject *)self, args); + + if (r < 0) { + return NULL; + } + Py_RETURN_NONE; +} static PyObject * array_clip(PyArrayObject *self, @@ -2666,8 +2614,7 @@ array_flatten(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("flatten", args, len_args, kwnames, - "|order", PyArray_OrderConverter, &order, - NULL, NULL, NULL) < 0) { + {"|order", PyArray_OrderConverter, &order}) < 0) { return NULL; } return PyArray_Flatten(self, order); @@ -2682,8 +2629,7 @@ array_ravel(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("ravel", args, len_args, kwnames, - "|order", PyArray_OrderConverter, &order, - NULL, NULL, NULL) < 0) { + {"|order", PyArray_OrderConverter, &order}) < 0) { return NULL; } return PyArray_Ravel(self, order); @@ -2875,6 +2821,16 @@ array_class_getitem(PyObject *cls, PyObject *args) return Py_GenericAlias(cls, args); } +static PyObject* array__set_shape(PyObject *self, PyObject *args) +{ + int r = array_shape_set_internal((PyArrayObject *)self, args); + + if (r < 0) { + return NULL; + } + Py_RETURN_NONE; +} + NPY_NO_EXPORT PyMethodDef array_methods[] = { /* for subtypes */ @@ -3099,6 +3055,10 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { (PyCFunction)array_dlpack_device, METH_NOARGS, NULL}, + // For deprecation of ndarray setters + {"_set_shape", + (PyCFunction)array__set_shape, + METH_O, NULL}, // For Array API compatibility {"__array_namespace__", (PyCFunction)array_array_namespace, @@ -3106,6 +3066,9 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { {"to_device", (PyCFunction)array_to_device, METH_VARARGS | METH_KEYWORDS, NULL}, - + // For deprecation of ndarray setters + {"_set_dtype", + (PyCFunction)array__set_dtype, + METH_O, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 8bede253a22f..eb97b0ff267d 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -123,6 +123,7 @@ get_legacy_print_mode(void) { PyObject *legacy_print_mode = NULL; if (PyDict_GetItemRef(format_options, npy_interned_str.legacy, &legacy_print_mode) == -1) { + Py_DECREF(format_options); return -1; } Py_DECREF(format_options); @@ -303,6 +304,7 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd, n = PyArray_DIMS(ap)[0]; ptr2 = (char **)PyArray_malloc(n * sizeof(char *)); if (!ptr2) { + Py_DECREF(ap); PyErr_NoMemory(); return -1; } @@ -316,6 +318,7 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd, m = PyArray_DIMS(ap)[1]; ptr3 = (char ***)PyArray_malloc(n*(m+1) * sizeof(char *)); if (!ptr3) { + Py_DECREF(ap); PyErr_NoMemory(); return -1; } @@ -878,7 +881,6 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2) { PyArrayObject *ap1 = NULL; PyArrayObject *ap2 = NULL; - int typenum; PyArray_Descr *typec = NULL; PyObject* ap2t = NULL; npy_intp dims[NPY_MAXDIMS]; @@ -886,23 +888,18 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2) int i; PyObject* ret = NULL; - typenum = PyArray_ObjectType(op1, NPY_NOTYPE); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op1, NPY_MAXDIMS, &typec) < 0) { return NULL; } - typenum = PyArray_ObjectType(op2, typenum); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op2, NPY_MAXDIMS, &typec) < 0) { + Py_XDECREF(typec); return NULL; } - typec = PyArray_DescrFromType(typenum); if (typec == NULL) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "Cannot find a common data type."); - } - goto fail; + typec = PyArray_DescrFromType(NPY_DEFAULT_TYPE); } + Py_SETREF(typec, NPY_DT_CALL_ensure_canonical(typec)); Py_INCREF(typec); ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, @@ -911,6 +908,8 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2) Py_DECREF(typec); goto fail; } + + Py_INCREF(typec); ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, NPY_ARRAY_ALIGNED, NULL); if (ap2 == NULL) { @@ -944,6 +943,7 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2) Py_DECREF(ap1); Py_DECREF(ap2); Py_DECREF(ap2t); + Py_DECREF(typec); return ret; fail: @@ -951,6 +951,7 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2) Py_XDECREF(ap2); Py_XDECREF(ap2t); Py_XDECREF(ret); + Py_XDECREF(typec); return NULL; } @@ -974,7 +975,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) PyArrayObject *ap1, *ap2, *out_buf = NULL, *result = NULL; PyArrayIterObject *it1, *it2; npy_intp i, j, l; - int typenum, nd, axis, matchDim; + int nd, axis, matchDim; npy_intp is1, is2, os; char *op; npy_intp dimensions[NPY_MAXDIMS]; @@ -982,23 +983,18 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) PyArray_Descr *typec = NULL; NPY_BEGIN_THREADS_DEF; - typenum = PyArray_ObjectType(op1, NPY_NOTYPE); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op1, NPY_MAXDIMS, &typec) < 0) { return NULL; } - typenum = PyArray_ObjectType(op2, typenum); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op2, NPY_MAXDIMS, &typec) < 0) { + Py_XDECREF(typec); return NULL; } - typec = PyArray_DescrFromType(typenum); if (typec == NULL) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "Cannot find a common data type."); - } - return NULL; + typec = PyArray_DescrFromType(NPY_DEFAULT_TYPE); } + Py_SETREF(typec, NPY_DT_CALL_ensure_canonical(typec)); Py_INCREF(typec); ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, @@ -1007,6 +1003,8 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) Py_DECREF(typec); return NULL; } + + Py_INCREF(typec); ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, NPY_ARRAY_ALIGNED, NULL); if (ap2 == NULL) { @@ -1016,9 +1014,9 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) #if defined(HAVE_CBLAS) if (PyArray_NDIM(ap1) <= 2 && PyArray_NDIM(ap2) <= 2 && - (NPY_DOUBLE == typenum || NPY_CDOUBLE == typenum || - NPY_FLOAT == typenum || NPY_CFLOAT == typenum)) { - return cblas_matrixproduct(typenum, ap1, ap2, out); + (NPY_DOUBLE == typec->type_num || NPY_CDOUBLE == typec->type_num || + NPY_FLOAT == typec->type_num || NPY_CFLOAT == typec->type_num)) { + return cblas_matrixproduct(typec, ap1, ap2, out); } #endif @@ -1059,7 +1057,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) is1 = PyArray_STRIDES(ap1)[PyArray_NDIM(ap1)-1]; is2 = PyArray_STRIDES(ap2)[matchDim]; /* Choose which subtype to return */ - out_buf = new_array_for_sum(ap1, ap2, out, nd, dimensions, typenum, &result); + out_buf = new_array_for_sum(ap1, ap2, out, nd, dimensions, typec, &result); if (out_buf == NULL) { goto fail; } @@ -1121,6 +1119,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) /* Trigger possible copy-back into `result` */ PyArray_ResolveWritebackIfCopy(out_buf); Py_DECREF(out_buf); + Py_DECREF(typec); return (PyObject *)result; @@ -1129,6 +1128,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) Py_XDECREF(ap2); Py_XDECREF(out_buf); Py_XDECREF(result); + Py_XDECREF(typec); return NULL; } @@ -1140,7 +1140,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) * inverted is set to 1 if computed correlate(ap2, ap1), 0 otherwise */ static PyArrayObject* -_pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, +_pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, PyArray_Descr *typec, int mode, int *inverted) { PyArrayObject *ret; @@ -1200,7 +1200,7 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, * Need to choose an output array that can hold a sum * -- use priority to determine which subtype. */ - ret = new_array_for_sum(ap1, ap2, NULL, 1, &length, typenum, NULL); + ret = new_array_for_sum(ap1, ap2, NULL, 1, &length, typec, NULL); if (ret == NULL) { return NULL; } @@ -1320,21 +1320,23 @@ NPY_NO_EXPORT PyObject * PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) { PyArrayObject *ap1, *ap2, *ret = NULL; - int typenum; - PyArray_Descr *typec; + PyArray_Descr *typec = NULL; int inverted; int st; - typenum = PyArray_ObjectType(op1, NPY_NOTYPE); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op1, NPY_MAXDIMS, &typec) < 0) { return NULL; } - typenum = PyArray_ObjectType(op2, typenum); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op2, NPY_MAXDIMS, &typec) < 0) { + Py_XDECREF(typec); return NULL; } - typec = PyArray_DescrFromType(typenum); + if (typec == NULL) { + typec = PyArray_DescrFromType(NPY_DEFAULT_TYPE); + } + Py_SETREF(typec, NPY_DT_CALL_ensure_canonical(typec)); + Py_INCREF(typec); ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 1, 1, NPY_ARRAY_DEFAULT, NULL); @@ -1342,6 +1344,8 @@ PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) Py_DECREF(typec); return NULL; } + + Py_INCREF(typec); ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 1, 1, NPY_ARRAY_DEFAULT, NULL); if (ap2 == NULL) { @@ -1358,7 +1362,7 @@ PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) ap2 = cap2; } - ret = _pyarray_correlate(ap1, ap2, typenum, mode, &inverted); + ret = _pyarray_correlate(ap1, ap2, typec, mode, &inverted); if (ret == NULL) { goto clean_ap2; } @@ -1376,6 +1380,7 @@ PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) Py_DECREF(ap1); Py_DECREF(ap2); + Py_DECREF(typec); return (PyObject *)ret; clean_ret: @@ -1384,6 +1389,8 @@ PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) Py_DECREF(ap2); clean_ap1: Py_DECREF(ap1); + + Py_DECREF(typec); return NULL; } @@ -1394,20 +1401,22 @@ NPY_NO_EXPORT PyObject * PyArray_Correlate(PyObject *op1, PyObject *op2, int mode) { PyArrayObject *ap1, *ap2, *ret = NULL; - int typenum; int unused; - PyArray_Descr *typec; + PyArray_Descr *typec = NULL; - typenum = PyArray_ObjectType(op1, NPY_NOTYPE); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op1, NPY_MAXDIMS, &typec) < 0) { return NULL; } - typenum = PyArray_ObjectType(op2, typenum); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op2, NPY_MAXDIMS, &typec) < 0) { + Py_XDECREF(typec); return NULL; } - typec = PyArray_DescrFromType(typenum); + if (typec == NULL) { + typec = PyArray_DescrFromType(NPY_DEFAULT_TYPE); + } + Py_SETREF(typec, NPY_DT_CALL_ensure_canonical(typec)); + Py_INCREF(typec); ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 1, 1, NPY_ARRAY_DEFAULT, NULL); @@ -1415,24 +1424,28 @@ PyArray_Correlate(PyObject *op1, PyObject *op2, int mode) Py_DECREF(typec); return NULL; } + + Py_INCREF(typec); ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 1, 1, NPY_ARRAY_DEFAULT, NULL); if (ap2 == NULL) { goto fail; } - ret = _pyarray_correlate(ap1, ap2, typenum, mode, &unused); + ret = _pyarray_correlate(ap1, ap2, typec, mode, &unused); if (ret == NULL) { goto fail; } Py_DECREF(ap1); Py_DECREF(ap2); + Py_DECREF(typec); return (PyObject *)ret; fail: Py_XDECREF(ap1); Py_XDECREF(ap2); Py_XDECREF(ret); + Py_XDECREF(typec); return NULL; } @@ -1445,10 +1458,9 @@ array_putmask(PyObject *NPY_UNUSED(module), PyObject *const *args, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("putmask", args, len_args, kwnames, - "", NULL, &array, - "mask", NULL, &mask, - "values", NULL, &values, - NULL, NULL, NULL) < 0) { + {"", NULL, &array}, + {"mask", NULL, &mask}, + {"values", NULL, &values}) < 0) { return NULL; } if (!PyArray_Check(array)) { @@ -1681,7 +1693,7 @@ _array_fromobject_generic( flags |= NPY_ARRAY_FORCECAST; ret = (PyArrayObject *)PyArray_CheckFromAny_int( - op, dtype, in_DType, 0, ndmax, flags, NULL); + op, dtype, in_DType, 0, ndmax, flags); finish: Py_XDECREF(dtype); @@ -1720,15 +1732,14 @@ array_array(PyObject *NPY_UNUSED(ignored), if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("array", args, len_args, kwnames, - "object", NULL, &op, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "$copy", &PyArray_CopyConverter, ©, - "$order", &PyArray_OrderConverter, &order, - "$subok", &PyArray_BoolConverter, &subok, - "$ndmin", &PyArray_PythonPyIntFromInt, &ndmin, - "$ndmax", &PyArray_PythonPyIntFromInt, &ndmax, - "$like", NULL, &like, - NULL, NULL, NULL) < 0) { + {"object", NULL, &op}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"$copy", &PyArray_CopyConverter, ©}, + {"$order", &PyArray_OrderConverter, &order}, + {"$subok", &PyArray_BoolConverter, &subok}, + {"$ndmin", &PyArray_PythonPyIntFromInt, &ndmin}, + {"$ndmax", &PyArray_PythonPyIntFromInt, &ndmax}, + {"$like", NULL, &like}) < 0) { Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return NULL; @@ -1776,13 +1787,12 @@ array_asarray(PyObject *NPY_UNUSED(ignored), if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("asarray", args, len_args, kwnames, - "a", NULL, &op, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "|order", &PyArray_OrderConverter, &order, - "$device", &PyArray_DeviceConverterOptional, &device, - "$copy", &PyArray_CopyConverter, ©, - "$like", NULL, &like, - NULL, NULL, NULL) < 0) { + {"a", NULL, &op}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"|order", &PyArray_OrderConverter, &order}, + {"$device", &PyArray_DeviceConverterOptional, &device}, + {"$copy", &PyArray_CopyConverter, ©}, + {"$like", NULL, &like}) < 0) { Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return NULL; @@ -1822,13 +1832,12 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored), if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("asanyarray", args, len_args, kwnames, - "a", NULL, &op, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "|order", &PyArray_OrderConverter, &order, - "$device", &PyArray_DeviceConverterOptional, &device, - "$copy", &PyArray_CopyConverter, ©, - "$like", NULL, &like, - NULL, NULL, NULL) < 0) { + {"a", NULL, &op}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"|order", &PyArray_OrderConverter, &order}, + {"$device", &PyArray_DeviceConverterOptional, &device}, + {"$copy", &PyArray_CopyConverter, ©}, + {"$like", NULL, &like}) < 0) { Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return NULL; @@ -1866,10 +1875,9 @@ array_ascontiguousarray(PyObject *NPY_UNUSED(ignored), if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("ascontiguousarray", args, len_args, kwnames, - "a", NULL, &op, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "$like", NULL, &like, - NULL, NULL, NULL) < 0) { + {"a", NULL, &op}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"$like", NULL, &like}) < 0) { Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return NULL; @@ -1908,10 +1916,9 @@ array_asfortranarray(PyObject *NPY_UNUSED(ignored), if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("asfortranarray", args, len_args, kwnames, - "a", NULL, &op, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "$like", NULL, &like, - NULL, NULL, NULL) < 0) { + {"a", NULL, &op}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"$like", NULL, &like}) < 0) { Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return NULL; @@ -1949,11 +1956,10 @@ array_copyto(PyObject *NPY_UNUSED(ignored), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("copyto", args, len_args, kwnames, - "dst", NULL, &dst_obj, - "src", NULL, &src_obj, - "|casting", &PyArray_CastingConverter, &casting, - "|where", NULL, &wheremask_in, - NULL, NULL, NULL) < 0) { + {"dst", NULL, &dst_obj}, + {"src", NULL, &src_obj}, + {"|casting", &PyArray_CastingConverter, &casting}, + {"|where", NULL, &wheremask_in}) < 0) { goto fail; } @@ -1973,9 +1979,16 @@ array_copyto(PyObject *NPY_UNUSED(ignored), Py_INCREF(DType); if (npy_mark_tmp_array_if_pyscalar(src_obj, src, &DType)) { /* The user passed a Python scalar */ - PyArray_Descr *descr = npy_find_descr_for_scalar( - src_obj, PyArray_DESCR(src), DType, - NPY_DTYPE(PyArray_DESCR(dst))); + PyArray_Descr *descr; + PyArray_DTypeMeta *dst_DType = NPY_DTYPE(PyArray_DESCR(dst)); + bool is_npy_nan = PyFloat_Check(src_obj) && npy_isnan(PyFloat_AsDouble(src_obj)); + if (!is_npy_nan && dst_DType->type_num == NPY_TIMEDELTA) { + descr = PyArray_DESCR(dst); + Py_INCREF(descr); + } else { + descr = npy_find_descr_for_scalar(src_obj, PyArray_DESCR(src), DType, + dst_DType); + } Py_DECREF(DType); if (descr == NULL) { goto fail; @@ -2032,12 +2045,11 @@ array_empty(PyObject *NPY_UNUSED(ignored), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("empty", args, len_args, kwnames, - "shape", &PyArray_IntpConverter, &shape, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "|order", &PyArray_OrderConverter, &order, - "$device", &PyArray_DeviceConverterOptional, &device, - "$like", NULL, &like, - NULL, NULL, NULL) < 0) { + {"shape", &PyArray_IntpConverter, &shape}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"|order", &PyArray_OrderConverter, &order}, + {"$device", &PyArray_DeviceConverterOptional, &device}, + {"$like", NULL, &like}) < 0) { goto fail; } @@ -2091,13 +2103,12 @@ array_empty_like(PyObject *NPY_UNUSED(ignored), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("empty_like", args, len_args, kwnames, - "prototype", &PyArray_Converter, &prototype, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "|order", &PyArray_OrderConverter, &order, - "|subok", &PyArray_PythonPyIntFromInt, &subok, - "|shape", &PyArray_OptionalIntpConverter, &shape, - "$device", &PyArray_DeviceConverterOptional, &device, - NULL, NULL, NULL) < 0) { + {"prototype", &PyArray_Converter, &prototype}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"|order", &PyArray_OrderConverter, &order}, + {"|subok", &PyArray_PythonPyIntFromInt, &subok}, + {"|shape", &PyArray_OptionalIntpConverter, &shape}, + {"$device", &PyArray_DeviceConverterOptional, &device}) < 0) { goto fail; } /* steals the reference to dt_info.descr if it's not NULL */ @@ -2237,12 +2248,11 @@ array_zeros(PyObject *NPY_UNUSED(ignored), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("zeros", args, len_args, kwnames, - "shape", &PyArray_IntpConverter, &shape, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "|order", &PyArray_OrderConverter, &order, - "$device", &PyArray_DeviceConverterOptional, &device, - "$like", NULL, &like, - NULL, NULL, NULL) < 0) { + {"shape", &PyArray_IntpConverter, &shape}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"|order", &PyArray_OrderConverter, &order}, + {"$device", &PyArray_DeviceConverterOptional, &device}, + {"$like", NULL, &like}) < 0) { goto finish; } @@ -2289,8 +2299,7 @@ array_count_nonzero(PyObject *NPY_UNUSED(self), PyObject *const *args, Py_ssize_ NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("count_nonzero", args, len_args, NULL, - "", PyArray_Converter, &array, - NULL, NULL, NULL) < 0) { + {"", PyArray_Converter, &array}) < 0) { return NULL; } @@ -2305,7 +2314,9 @@ array_count_nonzero(PyObject *NPY_UNUSED(self), PyObject *const *args, Py_ssize_ if (descr == NULL) { return NULL; } - return PyArray_Scalar(&count, descr, NULL); + PyObject *result = PyArray_Scalar(&count, descr, NULL); + Py_DECREF(descr); + return result; } static PyObject * @@ -2338,6 +2349,7 @@ array_fromstring(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds if (sep == NULL || strlen(sep) == 0) { PyErr_SetString(PyExc_ValueError, "The binary mode of fromstring is removed, use frombuffer instead"); + Py_XDECREF(descr); return NULL; } return PyArray_FromString(data, (npy_intp)s, descr, (npy_intp)nin, sep); @@ -2406,6 +2418,7 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) } if (npy_fseek(fp, offset, SEEK_CUR) != 0) { PyErr_SetFromErrno(PyExc_OSError); + Py_XDECREF(type); goto cleanup; } if (type == NULL) { @@ -2508,12 +2521,11 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("concatenate", args, len_args, kwnames, - "seq", NULL, &a0, - "|axis", &PyArray_AxisConverter, &axis, - "|out", NULL, &out, - "$dtype", &PyArray_DescrConverter2, &dtype, - "$casting", &PyArray_CastingConverter, &casting, - NULL, NULL, NULL) < 0) { + {"seq", NULL, &a0}, + {"|axis", &PyArray_AxisConverter, &axis}, + {"|out", NULL, &out}, + {"$dtype", &PyArray_DescrConverter2, &dtype}, + {"$casting", &PyArray_CastingConverter, &casting}) < 0) { return NULL; } if (out != NULL) { @@ -2539,9 +2551,8 @@ array_innerproduct(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_ NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("innerproduct", args, len_args, NULL, - "", NULL, &a0, - "", NULL, &b0, - NULL, NULL, NULL) < 0) { + {"", NULL, &a0}, + {"", NULL, &b0}) < 0) { return NULL; } @@ -2557,10 +2568,9 @@ array_matrixproduct(PyObject *NPY_UNUSED(dummy), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("dot", args, len_args, kwnames, - "a", NULL, &a, - "b", NULL, &v, - "|out", NULL, &o, - NULL, NULL, NULL) < 0) { + {"a", NULL, &a}, + {"b", NULL, &v}, + {"|out", NULL, &o}) < 0) { return NULL; } if (o != NULL) { @@ -2580,22 +2590,20 @@ array_matrixproduct(PyObject *NPY_UNUSED(dummy), static PyObject * array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len_args) { - int typenum; char *ip1, *ip2, *op; npy_intp n, stride1, stride2; PyObject *op1, *op2; npy_intp newdimptr[1] = {-1}; PyArray_Dims newdims = {newdimptr, 1}; PyArrayObject *ap1 = NULL, *ap2 = NULL, *ret = NULL; - PyArray_Descr *type; + PyArray_Descr *type = NULL; PyArray_DotFunc *vdot; NPY_BEGIN_THREADS_DEF; NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("vdot", args, len_args, NULL, - "", NULL, &op1, - "", NULL, &op2, - NULL, NULL, NULL) < 0) { + {"", NULL, &op1}, + {"", NULL, &op2}) < 0) { return NULL; } @@ -2603,16 +2611,19 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len_ar * Conjugating dot product using the BLAS for vectors. * Flattens both op1 and op2 before dotting. */ - typenum = PyArray_ObjectType(op1, NPY_NOTYPE); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op1, NPY_MAXDIMS, &type) < 0) { return NULL; } - typenum = PyArray_ObjectType(op2, typenum); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op2, NPY_MAXDIMS, &type) < 0) { + Py_XDECREF(type); return NULL; } - type = PyArray_DescrFromType(typenum); + if (type == NULL) { + type = PyArray_DescrFromType(NPY_DEFAULT_TYPE); + } + Py_SETREF(type, NPY_DT_CALL_ensure_canonical(type)); + Py_INCREF(type); ap1 = (PyArrayObject *)PyArray_FromAny(op1, type, 0, 0, 0, NULL); if (ap1 == NULL) { @@ -2628,6 +2639,7 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len_ar Py_DECREF(ap1); ap1 = (PyArrayObject *)op1; + Py_INCREF(type); ap2 = (PyArrayObject *)PyArray_FromAny(op2, type, 0, 0, 0, NULL); if (ap2 == NULL) { goto fail; @@ -2646,7 +2658,7 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len_ar } /* array scalar output */ - ret = new_array_for_sum(ap1, ap2, NULL, 0, (npy_intp *)NULL, typenum, NULL); + ret = new_array_for_sum(ap1, ap2, NULL, 0, (npy_intp *)NULL, type, NULL); if (ret == NULL) { goto fail; } @@ -2658,7 +2670,7 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len_ar ip2 = PyArray_DATA(ap2); op = PyArray_DATA(ret); - switch (typenum) { + switch (type->type_num) { case NPY_CFLOAT: vdot = (PyArray_DotFunc *)CFLOAT_vdot; break; @@ -2691,11 +2703,13 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len_ar Py_XDECREF(ap1); Py_XDECREF(ap2); + Py_XDECREF(type); return PyArray_Return(ret); fail: Py_XDECREF(ap1); Py_XDECREF(ap2); Py_XDECREF(ret); + Py_XDECREF(type); return NULL; } @@ -2979,11 +2993,10 @@ array_einsum(PyObject *NPY_UNUSED(dummy), /* Get the keyword arguments */ if (kwnames != NULL) { if (npy_parse_arguments("einsum", args+nargs, 0, kwnames, - "$out", NULL, &out_obj, - "$order", &PyArray_OrderConverter, &order, - "$casting", &PyArray_CastingConverter, &casting, - "$dtype", &PyArray_DescrConverter2, &dtype, - NULL, NULL, NULL) < 0) { + {"$out", NULL, &out_obj}, + {"$order", &PyArray_OrderConverter, &order}, + {"$casting", &PyArray_CastingConverter, &casting}, + {"$dtype", &PyArray_DescrConverter2, &dtype}) < 0) { goto finish; } if (out_obj != NULL && !PyArray_Check(out_obj)) { @@ -3025,10 +3038,9 @@ array_correlate(PyObject *NPY_UNUSED(dummy), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("correlate", args, len_args, kwnames, - "a", NULL, &a0, - "v", NULL, &shape, - "|mode", &PyArray_CorrelatemodeConverter, &mode, - NULL, NULL, NULL) < 0) { + {"a", NULL, &a0}, + {"v", NULL, &shape}, + {"|mode", &PyArray_CorrelatemodeConverter, &mode}) < 0) { return NULL; } return PyArray_Correlate(a0, shape, mode); @@ -3043,10 +3055,9 @@ array_correlate2(PyObject *NPY_UNUSED(dummy), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("correlate2", args, len_args, kwnames, - "a", NULL, &a0, - "v", NULL, &shape, - "|mode", &PyArray_CorrelatemodeConverter, &mode, - NULL, NULL, NULL) < 0) { + {"a", NULL, &a0}, + {"v", NULL, &shape}, + {"|mode", &PyArray_CorrelatemodeConverter, &mode}) < 0) { return NULL; } return PyArray_Correlate2(a0, shape, mode); @@ -3063,13 +3074,12 @@ array_arange(PyObject *NPY_UNUSED(ignored), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("arange", args, len_args, kwnames, - "|start", NULL, &o_start, - "|stop", NULL, &o_stop, - "|step", NULL, &o_step, - "|dtype", &PyArray_DescrConverter2, &typecode, - "$device", &PyArray_DeviceConverterOptional, &device, - "$like", NULL, &like, - NULL, NULL, NULL) < 0) { + {"|start", NULL, &o_start}, + {"|stop", NULL, &o_stop}, + {"|step", NULL, &o_step}, + {"|dtype", &PyArray_DescrConverter2, &typecode}, + {"$device", &PyArray_DeviceConverterOptional, &device}, + {"$like", NULL, &like}) < 0) { Py_XDECREF(typecode); return NULL; } @@ -3226,6 +3236,7 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) PyArrayObject *arr = NULL, *ax = NULL, *ay = NULL; PyObject *ret = NULL; PyArray_Descr *common_dt = NULL; + NpyIter *iter = NULL; arr = (PyArrayObject *)PyArray_FROM_O(condition); if (arr == NULL) { @@ -3273,6 +3284,21 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) if (common_dt == NULL) { goto fail; } + + if (PyArray_FLAGS(ax) & NPY_ARRAY_WAS_PYTHON_LITERAL) { + if (npy_update_operand_for_scalar(&ax, x, common_dt, NPY_SAFE_CASTING) < 0) { + goto fail; + } + op_in[2] = ax; + } + + if (PyArray_FLAGS(ay) & NPY_ARRAY_WAS_PYTHON_LITERAL) { + if (npy_update_operand_for_scalar(&ay, y, common_dt, NPY_SAFE_CASTING) < 0) { + goto fail; + } + op_in[3] = ay; + } + npy_intp itemsize = common_dt->elsize; // If x and y don't have references, we ask the iterator to create buffers @@ -3295,7 +3321,6 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) /* `PyArray_DescrFromType` cannot fail for simple builtin types: */ PyArray_Descr * op_dt[4] = {common_dt, PyArray_DescrFromType(NPY_BOOL), x_dt, y_dt}; - NpyIter * iter; NPY_BEGIN_THREADS_DEF; iter = NpyIter_MultiNew( @@ -3429,6 +3454,9 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) Py_XDECREF(common_dt); NPY_cast_info_xfree(&x_cast_info); NPY_cast_info_xfree(&y_cast_info); + if (iter != NULL) { + NpyIter_Deallocate(iter); + } return NULL; } @@ -3441,10 +3469,9 @@ array_where(PyObject *NPY_UNUSED(ignored), PyObject *const *args, Py_ssize_t len NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("where", args, len_args, NULL, - "", NULL, &obj, - "|x", NULL, &x, - "|y", NULL, &y, - NULL, NULL, NULL) < 0) { + {"", NULL, &obj}, + {"|x", NULL, &x}, + {"|y", NULL, &y}) < 0) { return NULL; } @@ -3460,9 +3487,8 @@ array_lexsort(PyObject *NPY_UNUSED(ignored), PyObject *const *args, Py_ssize_t l NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("lexsort", args, len_args, kwnames, - "keys", NULL, &obj, - "|axis", PyArray_PythonPyIntFromInt, &axis, - NULL, NULL, NULL) < 0) { + {"keys", NULL, &obj}, + {"|axis", PyArray_PythonPyIntFromInt, &axis}) < 0) { return NULL; } return PyArray_Return((PyArrayObject *)PyArray_LexSort(obj, axis)); @@ -3481,10 +3507,9 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("can_cast", args, len_args, kwnames, - "from_", NULL, &from_obj, - "to", &PyArray_DescrConverter2, &d2, - "|casting", &PyArray_CastingConverter, &casting, - NULL, NULL, NULL) < 0) { + {"from_", NULL, &from_obj}, + {"to", &PyArray_DescrConverter2, &d2}, + {"|casting", &PyArray_CastingConverter, &casting}) < 0) { goto finish; } if (d2 == NULL) { @@ -3551,9 +3576,8 @@ array_promote_types(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("promote_types", args, len_args, NULL, - "", PyArray_DescrConverter2, &d1, - "", PyArray_DescrConverter2, &d2, - NULL, NULL, NULL) < 0) { + {"", PyArray_DescrConverter2, &d1}, + {"", PyArray_DescrConverter2, &d2}) < 0) { goto finish; } @@ -3733,15 +3757,14 @@ dragon4_scientific(PyObject *NPY_UNUSED(dummy), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("dragon4_scientific", args, len_args, kwnames, - "x", NULL , &obj, - "|precision", &PyArray_PythonPyIntFromInt, &precision, - "|unique", &PyArray_PythonPyIntFromInt, &unique, - "|sign", &PyArray_PythonPyIntFromInt, &sign, - "|trim", &trimmode_converter, &trim, - "|pad_left", &PyArray_PythonPyIntFromInt, &pad_left, - "|exp_digits", &PyArray_PythonPyIntFromInt, &exp_digits, - "|min_digits", &PyArray_PythonPyIntFromInt, &min_digits, - NULL, NULL, NULL) < 0) { + {"x", NULL, &obj}, + {"|precision", &PyArray_PythonPyIntFromInt, &precision}, + {"|unique", &PyArray_PythonPyIntFromInt, &unique}, + {"|sign", &PyArray_PythonPyIntFromInt, &sign}, + {"|trim", &trimmode_converter, &trim}, + {"|pad_left", &PyArray_PythonPyIntFromInt, &pad_left}, + {"|exp_digits", &PyArray_PythonPyIntFromInt, &exp_digits}, + {"|min_digits", &PyArray_PythonPyIntFromInt, &min_digits}) < 0) { return NULL; } @@ -3776,16 +3799,15 @@ dragon4_positional(PyObject *NPY_UNUSED(dummy), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("dragon4_positional", args, len_args, kwnames, - "x", NULL , &obj, - "|precision", &PyArray_PythonPyIntFromInt, &precision, - "|unique", &PyArray_PythonPyIntFromInt, &unique, - "|fractional", &PyArray_PythonPyIntFromInt, &fractional, - "|sign", &PyArray_PythonPyIntFromInt, &sign, - "|trim", &trimmode_converter, &trim, - "|pad_left", &PyArray_PythonPyIntFromInt, &pad_left, - "|pad_right", &PyArray_PythonPyIntFromInt, &pad_right, - "|min_digits", &PyArray_PythonPyIntFromInt, &min_digits, - NULL, NULL, NULL) < 0) { + {"x", NULL, &obj}, + {"|precision", &PyArray_PythonPyIntFromInt, &precision}, + {"|unique", &PyArray_PythonPyIntFromInt, &unique}, + {"|fractional", &PyArray_PythonPyIntFromInt, &fractional}, + {"|sign", &PyArray_PythonPyIntFromInt, &sign}, + {"|trim", &trimmode_converter, &trim}, + {"|pad_left", &PyArray_PythonPyIntFromInt, &pad_left}, + {"|pad_right", &PyArray_PythonPyIntFromInt, &pad_right}, + {"|min_digits", &PyArray_PythonPyIntFromInt, &min_digits}) < 0) { return NULL; } @@ -4313,10 +4335,9 @@ normalize_axis_index(PyObject *NPY_UNUSED(self), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("normalize_axis_index", args, len_args, kwnames, - "axis", &PyArray_PythonPyIntFromInt, &axis, - "ndim", &PyArray_PythonPyIntFromInt, &ndim, - "|msg_prefix", NULL, &msg_prefix, - NULL, NULL, NULL) < 0) { + {"axis", &PyArray_PythonPyIntFromInt, &axis}, + {"ndim", &PyArray_PythonPyIntFromInt, &ndim}, + {"|msg_prefix", NULL, &msg_prefix}) < 0) { return NULL; } if (check_and_adjust_axis_msg(&axis, ndim, msg_prefix) < 0) { @@ -4383,7 +4404,7 @@ _populate_finfo_constants(PyObject *NPY_UNUSED(self), PyObject *args) buffer_data = PyArray_BYTES(buffer_array); npy_intp elsize = PyArray_DESCR(buffer_array)->elsize; - for (int i = 0; i < n_finfo_constants; i++) + for (int i = 0; i < n_finfo_constants; i++) { PyObject *value_obj; if (!finfo_constants[i].is_int) { @@ -4436,8 +4457,8 @@ _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) if (res < 0) { return NULL; } - int old_value = npy_thread_unsafe_state.warn_if_no_mem_policy; - npy_thread_unsafe_state.warn_if_no_mem_policy = res; + int old_value = npy_global_state.warn_if_no_mem_policy; + npy_global_state.warn_if_no_mem_policy = res; if (old_value) { Py_RETURN_TRUE; } @@ -4468,7 +4489,6 @@ _blas_supports_fpe(PyObject *NPY_UNUSED(self), PyObject *arg) { static PyObject * _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { -#if !defined(PYPY_VERSION) if (PyThreadState_Get()->interp != PyInterpreterState_Main()) { if (PyErr_WarnEx(PyExc_UserWarning, "NumPy was imported from a Python sub-interpreter but " @@ -4483,11 +4503,10 @@ _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { return NULL; } /* No need to give the other warning in a sub-interpreter as well... */ - npy_thread_unsafe_state.reload_guard_initialized = 1; + npy_global_state.reload_guard_initialized = 1; Py_RETURN_NONE; } -#endif - if (npy_thread_unsafe_state.reload_guard_initialized) { + if (npy_global_state.reload_guard_initialized) { if (PyErr_WarnEx(PyExc_UserWarning, "The NumPy module was reloaded (imported a second time). " "This can in some cases result in small but subtle issues " @@ -4495,7 +4514,7 @@ _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { return NULL; } } - npy_thread_unsafe_state.reload_guard_initialized = 1; + npy_global_state.reload_guard_initialized = 1; Py_RETURN_NONE; } @@ -4698,8 +4717,6 @@ static struct PyMethodDef array_module_methods[] = { {"_set_numpy_warn_if_no_mem_policy", (PyCFunction)_set_numpy_warn_if_no_mem_policy, METH_O, "Change the warn if no mem policy flag for testing."}, - {"_add_newdoc_ufunc", (PyCFunction)add_newdoc_ufunc, - METH_VARARGS, NULL}, {"_get_sfloat_dtype", get_sfloat_dtype, METH_NOARGS, NULL}, {"_get_madvise_hugepage", (PyCFunction)_get_madvise_hugepage, @@ -4886,16 +4903,16 @@ set_flaginfo(PyObject *d) } // static variables are automatically zero-initialized -NPY_VISIBILITY_HIDDEN npy_thread_unsafe_state_struct npy_thread_unsafe_state; +NPY_VISIBILITY_HIDDEN npy_global_state_struct npy_global_state; static int -initialize_thread_unsafe_state(void) { +initialize_global_state(void) { char *env = getenv("NUMPY_WARN_IF_NO_MEM_POLICY"); if ((env != NULL) && (strncmp(env, "1", 1) == 0)) { - npy_thread_unsafe_state.warn_if_no_mem_policy = 1; + npy_global_state.warn_if_no_mem_policy = 1; } else { - npy_thread_unsafe_state.warn_if_no_mem_policy = 0; + npy_global_state.warn_if_no_mem_policy = 0; } return 0; @@ -4954,7 +4971,7 @@ _multiarray_umath_exec(PyObject *m) { return -1; } - if (initialize_thread_unsafe_state() < 0) { + if (initialize_global_state() < 0) { return -1; } @@ -5172,7 +5189,13 @@ _multiarray_umath_exec(PyObject *m) { if (PyDataMem_DefaultHandler == NULL) { return -1; } - +#ifdef Py_GIL_DISABLED + if (PyUnstable_SetImmortal(PyDataMem_DefaultHandler) == 0) { + PyErr_SetString(PyExc_RuntimeError, + "Could not mark memory handler capsule as immortal"); + return -1; + } +#endif /* * Initialize the context-local current handler * with the default PyDataMem_Handler capsule. @@ -5206,6 +5229,16 @@ _multiarray_umath_exec(PyObject *m) { if (npy_static_pydata.ndarray_array_function == NULL) { return -1; } + npy_static_pydata.ndarray_set_dtype = PyObject_GetAttrString( + (PyObject *)&PyArray_Type, "_set_dtype"); + if (npy_static_pydata.ndarray_set_dtype == NULL) { + return -1; + } + npy_static_pydata.ndarray_dtype_descr = PyObject_GetAttrString( + (PyObject *)&PyArray_Type, "dtype"); + if (npy_static_pydata.ndarray_dtype_descr == NULL) { + return -1; + } /* * Initialize np.dtypes.StringDType diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index de234a8495d3..7b1da2ea9765 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -2,60 +2,14 @@ #define NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ /* - * A struct storing thread-unsafe global state for the _multiarray_umath - * module. We should refactor so the global state is thread-safe, - * e.g. by adding locking. + * A struct storing global state for the _multiarray_umath + * module. The state is initialized when the module is imported + * so no locking is necessary to access it. + * + * These globals will need to move to per-module state to + * support reloading or subinterpreters. */ -typedef struct npy_thread_unsafe_state_struct { - /* - * Cached references to objects obtained via an import. All of these are - * can be initialized at any time by npy_cache_import. - * - * Currently these are not initialized in a thread-safe manner but the - * failure mode is a reference leak for references to imported immortal - * modules so it will never lead to a crash unless users are doing something - * janky that we don't support like reloading. - * - * TODO: maybe make each entry a struct that looks like: - * - * struct { - * atomic_int initialized; - * PyObject *value; - * } - * - * so the initialization is thread-safe and the only possible lock - * contention happens before the cache is initialized, not on every single - * read. - */ - PyObject *_add_dtype_helper; - PyObject *_all; - PyObject *_amax; - PyObject *_amin; - PyObject *_any; - PyObject *array_function_errmsg_formatter; - PyObject *array_ufunc_errmsg_formatter; - PyObject *_clip; - PyObject *_commastring; - PyObject *_convert_to_stringdtype_kwargs; - PyObject *_default_array_repr; - PyObject *_default_array_str; - PyObject *_dump; - PyObject *_dumps; - PyObject *_getfield_is_safe; - PyObject *internal_gcd_func; - PyObject *_mean; - PyObject *NO_NEP50_WARNING; - PyObject *npy_ctypes_check; - PyObject *numpy_matrix; - PyObject *_prod; - PyObject *_promote_fields; - PyObject *_std; - PyObject *_sum; - PyObject *_ufunc_doc_signature_formatter; - PyObject *_var; - PyObject *_view_is_safe; - PyObject *_void_scalar_to_string; - +typedef struct npy_global_state_struct { /* * Used to test the internal-only scaled float test dtype */ @@ -76,11 +30,10 @@ typedef struct npy_thread_unsafe_state_struct { * if there is no memory policy set */ int warn_if_no_mem_policy; - -} npy_thread_unsafe_state_struct; +} npy_global_state_struct; -NPY_VISIBILITY_HIDDEN extern npy_thread_unsafe_state_struct npy_thread_unsafe_state; +NPY_VISIBILITY_HIDDEN extern npy_global_state_struct npy_global_state; NPY_NO_EXPORT int get_legacy_print_mode(void); diff --git a/numpy/_core/src/multiarray/nditer_api.c b/numpy/_core/src/multiarray/nditer_api.c index da58489c6b9d..e0baeeeadb53 100644 --- a/numpy/_core/src/multiarray/nditer_api.c +++ b/numpy/_core/src/multiarray/nditer_api.c @@ -1738,7 +1738,12 @@ npyiter_allocate_buffers(NpyIter *iter, char **errmsg) */ if (!(flags&NPY_OP_ITFLAG_BUFNEVER)) { npy_intp itemsize = op_dtype[iop]->elsize; - buffer = PyArray_malloc(itemsize*buffersize); + npy_intp alloc_size; + buffer = NULL; + if (!npy_mul_sizes_with_overflow( + &alloc_size, itemsize, buffersize)) { + buffer = PyArray_malloc(alloc_size); + } if (buffer == NULL) { if (errmsg == NULL) { PyErr_NoMemory(); @@ -1749,7 +1754,7 @@ npyiter_allocate_buffers(NpyIter *iter, char **errmsg) goto fail; } if (PyDataType_FLAGCHK(op_dtype[iop], NPY_NEEDS_INIT)) { - memset(buffer, '\0', itemsize*buffersize); + memset(buffer, '\0', alloc_size); } buffers[iop] = buffer; } diff --git a/numpy/_core/src/multiarray/nditer_constr.c b/numpy/_core/src/multiarray/nditer_constr.c index ffe37e80c9be..4e6324c2ad64 100644 --- a/numpy/_core/src/multiarray/nditer_constr.c +++ b/numpy/_core/src/multiarray/nditer_constr.c @@ -538,6 +538,10 @@ NpyIter_Copy(NpyIter *iter) /* Allocate memory for the new iterator */ size = NIT_SIZEOF_ITERATOR(itflags, ndim, nop); newiter = (NpyIter*)PyObject_Malloc(size); + if (newiter == NULL) { + PyErr_NoMemory(); + return NULL; + } /* Copy the raw values to the new iterator */ memcpy(newiter, iter, size); diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index a0e1b09a584c..992bc013af3a 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -1423,7 +1423,10 @@ npyiter_enable_external_loop( return NULL; } - NpyIter_EnableExternalLoop(self->iter); + if (NpyIter_EnableExternalLoop(self->iter) != NPY_SUCCEED) { + return NULL; + } + /* EnableExternalLoop invalidates cached values */ if (npyiter_cache_values(self) < 0) { return NULL; diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index 997c798c665d..40dfc2dddaf7 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -69,6 +69,12 @@ intern_strings(void) INTERN_STRING(copy, "copy"); INTERN_STRING(dl_device, "dl_device"); INTERN_STRING(max_version, "max_version"); + INTERN_STRING(array_dealloc, "array_dealloc"); + INTERN_STRING(real, "real"); + INTERN_STRING(imag, "imag"); + INTERN_STRING(sort, "sort"); + INTERN_STRING(argsort, "argsort"); + INTERN_STRING(_set_dtype, "_set_dtype"); return 0; } diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index f3d1135ec044..abf98b5f0c09 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -48,6 +48,12 @@ typedef struct npy_interned_str_struct { PyObject *copy; PyObject *dl_device; PyObject *max_version; + PyObject *array_dealloc; + PyObject *real; + PyObject *imag; + PyObject *sort; + PyObject *argsort; + PyObject *_set_dtype; } npy_interned_str_struct; /* @@ -85,6 +91,13 @@ typedef struct npy_static_pydata_struct { PyObject *ndarray_array_finalize; PyObject *ndarray_array_function; + /* + * References to ndarray._set_dtype and ndarray.dtype descriptor, + * used in PyArray_View to detect subclass overrides. + */ + PyObject *ndarray_set_dtype; + PyObject *ndarray_dtype_descr; + /* * References to the '1' and '0' PyLong objects */ diff --git a/numpy/_core/src/multiarray/number.c b/numpy/_core/src/multiarray/number.c index de4012641684..83c9b9ffad1a 100644 --- a/numpy/_core/src/multiarray/number.c +++ b/numpy/_core/src/multiarray/number.c @@ -122,6 +122,8 @@ _PyArray_SetNumericOps(PyObject *dict) SET(conjugate); SET(matmul); SET(clip); + SET(real); + SET(imag); // initialize static globals needed for matmul npy_static_pydata.axes_1d_obj_kwargs = Py_BuildValue( @@ -146,6 +148,9 @@ _get_keywords(int rtype, PyArrayObject *out) PyObject *kwds = NULL; if (rtype != NPY_NOTYPE || out != NULL) { kwds = PyDict_New(); + if (kwds == NULL) { + return NULL; + } if (rtype != NPY_NOTYPE) { PyArray_Descr *descr; descr = PyArray_DescrFromType(rtype); @@ -169,13 +174,16 @@ PyArray_GenericReduceFunction(PyArrayObject *m1, PyObject *op, int axis, PyObject *kwds; args = Py_BuildValue("(Oi)", m1, axis); + if (args == NULL) { + return NULL; + } kwds = _get_keywords(rtype, out); meth = PyObject_GetAttrString(op, "reduce"); if (meth && PyCallable_Check(meth)) { ret = PyObject_Call(meth, args, kwds); } Py_DECREF(args); - Py_DECREF(meth); + Py_XDECREF(meth); Py_XDECREF(kwds); return ret; } @@ -189,13 +197,16 @@ PyArray_GenericAccumulateFunction(PyArrayObject *m1, PyObject *op, int axis, PyObject *kwds; args = Py_BuildValue("(Oi)", m1, axis); + if (args == NULL) { + return NULL; + } kwds = _get_keywords(rtype, out); meth = PyObject_GetAttrString(op, "accumulate"); if (meth && PyCallable_Check(meth)) { ret = PyObject_Call(meth, args, kwds); } Py_DECREF(args); - Py_DECREF(meth); + Py_XDECREF(meth); Py_XDECREF(kwds); return ret; } diff --git a/numpy/_core/src/multiarray/number.h b/numpy/_core/src/multiarray/number.h index 8cbcf5f2da25..497793abd7a8 100644 --- a/numpy/_core/src/multiarray/number.h +++ b/numpy/_core/src/multiarray/number.h @@ -41,6 +41,8 @@ typedef struct { PyObject *conjugate; PyObject *matmul; PyObject *clip; + PyObject *real; + PyObject *imag; } NumericOps; extern NPY_NO_EXPORT NumericOps n_ops; diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index ac70f38f39a5..9c5a15da03f5 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -124,7 +124,7 @@ PyArray_ZeroContiguousBuffer( * and only arrays which own their memory should clear it. */ int aligned = PyArray_ISALIGNED(arr); - if (PyArray_ISCONTIGUOUS(arr)) { + if (PyArray_ISCONTIGUOUS(arr) || PyArray_IS_F_CONTIGUOUS(arr)) { return PyArray_ClearBuffer( descr, PyArray_BYTES(arr), descr->elsize, PyArray_SIZE(arr), aligned); @@ -152,10 +152,12 @@ PyArray_ZeroContiguousBuffer( /* Process the innermost dimension */ if (clear_info.func(NULL, clear_info.descr, data_it, inner_shape, inner_stride, clear_info.auxdata) < 0) { + NPY_traverse_info_xfree(&clear_info); return -1; } } NPY_RAW_ITER_ONE_NEXT(idim, ndim, coord, shape_it, data_it, strides_it); + NPY_traverse_info_xfree(&clear_info); return 0; } diff --git a/numpy/_core/src/multiarray/scalarapi.c b/numpy/_core/src/multiarray/scalarapi.c index a602e312727b..08923508d601 100644 --- a/numpy/_core/src/multiarray/scalarapi.c +++ b/numpy/_core/src/multiarray/scalarapi.c @@ -346,7 +346,7 @@ PyArray_DescrFromTypeObject(PyObject *type) _PyArray_LegacyDescr *conv = NULL; int res = PyObject_GetOptionalAttr(type, npy_interned_str.dtype, &attr); if (res < 0) { - return NULL; // Should be a rather criticial error, so just fail. + return NULL; // Should be a rather critical error, so just fail. } if (res == 1) { if (!PyArray_DescrCheck(attr)) { @@ -402,67 +402,37 @@ PyArray_DescrFromTypeObject(PyObject *type) NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromScalar(PyObject *sc) { - int type_num; - PyArray_Descr *descr; - - if (PyArray_IsScalar(sc, Void)) { - descr = (PyArray_Descr *)((PyVoidScalarObject *)sc)->descr; - Py_INCREF(descr); - return descr; - } - - if (PyArray_IsScalar(sc, Datetime) || PyArray_IsScalar(sc, Timedelta)) { - PyArray_DatetimeMetaData *dt_data; - - if (PyArray_IsScalar(sc, Datetime)) { - descr = PyArray_DescrNewFromType(NPY_DATETIME); - } - else { - /* Timedelta */ - descr = PyArray_DescrNewFromType(NPY_TIMEDELTA); - } - if (descr == NULL) { - return NULL; - } - dt_data = &(((PyArray_DatetimeDTypeMetaData *)((_PyArray_LegacyDescr *)descr)->c_metadata)->meta); - memcpy(dt_data, &((PyDatetimeScalarObject *)sc)->obmeta, - sizeof(PyArray_DatetimeMetaData)); - - return descr; + /* + * Look up the DType directly from the scalar's type. This avoids calling + * the NPY_DT_default_descr slot (via PyArray_GetDefaultDescr), which for + * parametric dtypes may raise an error or return an incorrect stub. + * Once we have the DType class, discover_descr_from_pyobject extracts the + * correct instance-specific descriptor (handling void, datetime, string, + * and new-style user-defined parametric dtypes correctly). + */ + PyArray_DTypeMeta *DType = + (PyArray_DTypeMeta *)PyArray_DiscoverDTypeFromScalarType(Py_TYPE(sc)); + if (DType != NULL) { + PyArray_Descr *result = NPY_DT_CALL_discover_descr_from_pyobject(DType, sc); + Py_DECREF(DType); + return result; } - descr = PyArray_DescrFromTypeObject((PyObject *)Py_TYPE(sc)); + /* + * Fallback for scalar subclasses that are not directly in the scalar-type + * registry (e.g. a Python subclass of np.float64). These are always + * legacy non-parametric dtypes, so PyArray_DescrFromTypeObject is safe: + * it walks the MRO to find the registered base type and calls + * PyArray_GetDefaultDescr, which works correctly for non-parametric dtypes. + */ + PyArray_Descr *descr = PyArray_DescrFromTypeObject((PyObject *)Py_TYPE(sc)); if (descr == NULL) { return NULL; } - if (PyDataType_ISLEGACY(descr) && PyDataType_ISUNSIZED(descr)) { - PyArray_DESCR_REPLACE(descr); - if (descr == NULL) { - return NULL; - } - type_num = descr->type_num; - if (type_num == NPY_STRING) { - descr->elsize = PyBytes_GET_SIZE(sc); - } - else if (type_num == NPY_UNICODE) { - descr->elsize = PyUnicode_GET_LENGTH(sc) * 4; - } - else { - _PyArray_LegacyDescr *ldescr = (_PyArray_LegacyDescr *)descr; - PyArray_Descr *dtype; - dtype = (PyArray_Descr *)PyObject_GetAttrString(sc, "dtype"); - if (dtype != NULL) { - descr->elsize = dtype->elsize; - ldescr->fields = PyDataType_FIELDS(dtype); - Py_XINCREF(ldescr->fields); - ldescr->names = PyDataType_NAMES(dtype); - Py_XINCREF(ldescr->names); - Py_DECREF(dtype); - } - PyErr_Clear(); - } - } - return descr; + DType = NPY_DTYPE(descr); + PyArray_Descr *result = NPY_DT_CALL_discover_descr_from_pyobject(DType, sc); + Py_DECREF(descr); + return result; } /*NUMPY_API diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 2d63dd6e3602..684fff1bea1d 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -20,6 +20,7 @@ #include "mapping.h" #include "ctors.h" #include "dtypemeta.h" +#include "descriptor.h" #include "usertypes.h" #include "number.h" #include "numpyos.h" @@ -226,7 +227,7 @@ find_binary_operation_path( */ int was_scalar; PyArrayObject *arr = (PyArrayObject *)PyArray_FromAny_int( - other, NULL, NULL, 0, NPY_MAXDIMS, 0, NULL, &was_scalar); + other, NULL, NULL, 0, NPY_MAXDIMS, 0, &was_scalar); if (arr == NULL) { return -1; } @@ -955,7 +956,7 @@ datetimetype_repr(PyObject *self) ret = PyUnicode_FromFormat("numpy.datetime64('%s')", iso); } } - + Py_DECREF(meta); } else { PyObject *meta = metastr_to_unicode(&scal->obmeta, 1); @@ -1787,26 +1788,78 @@ gentype_shape_get(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) } +static PyObject * +gentype_dataptr_get(PyObject *self) +{ + return Py_BuildValue( + "NO", + PyLong_FromVoidPtr(scalar_value(self, NULL)), + Py_True + ); +} + + static PyObject * gentype_interface_get(PyObject *self, void *NPY_UNUSED(ignored)) { - PyArrayObject *arr; - PyObject *inter; + PyObject *dataptr = NULL; + PyObject *strides = NULL; + PyObject *shape = NULL; + PyObject *descr = NULL; + PyObject *typestr = NULL; + PyArray_Descr *array_descr = NULL; + PyObject *inter = NULL; - arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr == NULL) { - return NULL; + + array_descr = PyArray_DescrFromScalar(self); + if (array_descr == NULL) { + goto finish; } - inter = PyObject_GetAttrString((PyObject *)arr, "__array_interface__"); - if (inter != NULL) { - PyDict_SetItemString(inter, "__ref", (PyObject *)arr); + + /* dataptr */ + dataptr = gentype_dataptr_get(self); + if (dataptr == NULL) { + goto finish; } - Py_DECREF(arr); + + /* strides */ + strides = gentype_shape_get(self, NULL); + if (strides == NULL) { + goto finish; + } + + /* descr */ + descr = array_protocol_descr_get(array_descr); + if (descr == NULL) { + goto finish; + } + + /* typestr */ + typestr = arraydescr_protocol_typestr_get(array_descr, NULL); + if (typestr == NULL) { + goto finish; + } + + /* shape */ + shape = gentype_shape_get(self, NULL); + if (shape == NULL) { + goto finish; + } + + inter = build_array_interface(dataptr, descr, strides, typestr, shape); + goto finish; + +finish: + Py_XDECREF(descr); + Py_XDECREF(dataptr); + Py_XDECREF(strides); + Py_XDECREF(shape); + Py_XDECREF(typestr); + Py_XDECREF(array_descr); return inter; } - static PyObject * gentype_typedescr_get(PyObject *self, void *NPY_UNUSED(ignored)) { @@ -2075,7 +2128,7 @@ static PyObject * gentype___copy__(PyObject *self) { // scalars are immutable, so we can return a new reference - // the only expections are scalars with void dtype + // the only exceptions are scalars with void dtype if (PyObject_IsInstance(self, (PyObject *)&PyVoidArrType_Type)) { // path via array return gentype_generic_method(self, NULL, NULL, "__copy__"); @@ -2089,7 +2142,7 @@ gentype___deepcopy__(PyObject *self, PyObject *args) // note: maybe the signature needs to be updated as __deepcopy__ can accept the keyword memo // scalars are immutable, so we can return a new reference - // the only expections are scalars with void dtype + // the only exceptions are scalars with void dtype // if the number of arguments is not 1, we let gentype_generic_method do the // error handling if (PyObject_IsInstance(self, (PyObject *)&PyVoidArrType_Type) || (PyTuple_Size(args)!=1)) { @@ -3631,6 +3684,17 @@ static PyObject * #else ret->obval = 0; #endif + + if (ret->obmeta.base == NPY_FR_GENERIC) { + if (DEPRECATE( + "The 'generic' unit for NumPy timedelta is deprecated, " + "and will raise an error in the future. " + "Please use a specific unit instead.") < 0) { + Py_DECREF(ret); + return NULL; + } + } + } else if (convert_pyobject_to_@name@(&ret->obmeta, obj, NPY_SAME_KIND_CASTING, &ret->obval) < 0) { diff --git a/numpy/_core/src/multiarray/sequence.c b/numpy/_core/src/multiarray/sequence.c index 4c94bb798072..ce2e2059e218 100644 --- a/numpy/_core/src/multiarray/sequence.c +++ b/numpy/_core/src/multiarray/sequence.c @@ -56,7 +56,6 @@ array_concat(PyObject *self, PyObject *other) { /* * Throw a type error, when trying to concat NDArrays - * NOTE: This error is not Thrown when running with PyPy */ PyErr_SetString(PyExc_TypeError, "Concatenation operation is not implemented for NumPy arrays, " diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index fce61ef36e63..bbdc20781dcb 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -77,35 +77,41 @@ PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) return -1; } + static const char *msg = + "cannot resize an array that references or is referenced\n" + "by another object in this way.\n" + "Use the np.resize function to get a new resized copy or\n " + "set refcheck=False to disable this check"; if (PyArray_BASE(self) != NULL || (((PyArrayObject_fields *)self)->weakreflist != NULL)) { - PyErr_SetString(PyExc_ValueError, - "cannot resize an array that " - "references or is referenced\n" - "by another array in this way. Use the np.resize function."); + PyErr_SetString(PyExc_ValueError, msg); return -1; } if (refcheck) { -#ifdef PYPY_VERSION - PyErr_SetString(PyExc_ValueError, - "cannot resize an array with refcheck=True on PyPy.\n" - "Use the np.resize function or refcheck=False"); - return -1; -#else #if PY_VERSION_HEX >= 0x030E00B0 + // Python 3.14 changed reference counting semantics for function- + // local variables. There is no way to tell if the calling function + // has been optimized (because it might be implemented in C or Cython) + // + // Instead, warn if the refcount is exactly 2 that this might be a + // false positive if (!PyUnstable_Object_IsUniquelyReferenced((PyObject *)self)) { + if (Py_REFCNT(self) == 2) { + PyErr_SetString( + PyExc_ValueError, + "cannot resize an array that may be referenced " + "by another object.\n" + "It is possible that this is a false positive.\n" + "If you are sure that the array is uniquely referenced, " + "set refcheck=False."); + return -1; + } #else if (Py_REFCNT(self) > 2) { #endif - PyErr_SetString( - PyExc_ValueError, - "cannot resize an array that " - "references or is referenced\n" - "by another array in this way.\n" - "Use the np.resize function or refcheck=False"); + PyErr_SetString(PyExc_ValueError, msg); return -1; } -#endif /* PYPY_VERSION */ } /* Reallocate space if needed - allocating 0 is forbidden */ PyObject *handler = PyArray_HANDLER(self); @@ -182,6 +188,12 @@ PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) * array and it is contiguous. If refcheck is 0, then the reference count is * not checked and assumed to be 1. You still must own this data and have no * weak-references and no base object. + * + * On Python 3.13 and older, the check allows objects with exactly one + * reference to be reallocated in-place. On Python 3.14 and newer, the array + * must be uniquely referenced. In some cases this can lead to spurious + * ValueErrors. + * */ NPY_NO_EXPORT PyObject * PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 85514ef15df6..60f3db200d32 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -101,6 +101,7 @@ new_stringdtype_instance(PyObject *na_object, int coerce) na_name.buf = PyMem_RawMalloc(size); if (na_name.buf == NULL) { Py_DECREF(na_pystr); + PyErr_NoMemory(); goto fail; } memcpy((char *)na_name.buf, utf8_ptr, size); @@ -399,14 +400,7 @@ stringdtype_getitem(PyArray_StringDTypeObject *descr, char **dataptr) } } else { -#ifndef PYPY_VERSION val_obj = PyUnicode_FromStringAndSize(sdata.buf, sdata.size); -#else - // work around pypy issue #4046, can delete this when the fix is in - // a released version of pypy - val_obj = PyUnicode_FromStringAndSize( - sdata.buf == NULL ? "" : sdata.buf, sdata.size); -#endif if (val_obj == NULL) { goto fail; } @@ -692,14 +686,9 @@ stringdtype_wrap_sort_loop( { PyArray_StringDTypeObject *sdescr = (PyArray_StringDTypeObject *)context->descriptors[0]; - PyArray_SortImpl *sort_loop = - ((PyArrayMethod_SortParameters *)context->parameters)->flags - == NPY_SORT_STABLE ? &npy_mergesort_impl : &npy_quicksort_impl; npy_string_allocator *allocator = NpyString_acquire_allocator(sdescr); - int ret = sort_loop( - data[0], dimensions[0], context, - context->descriptors[0]->elsize, &_sort_compare); + int ret = npy_default_sort_loop(context, data, dimensions, strides, transferdata); NpyString_release_allocator(allocator); return ret; } @@ -737,14 +726,9 @@ stringdtype_wrap_argsort_loop( { PyArray_StringDTypeObject *sdescr = (PyArray_StringDTypeObject *)context->descriptors[0]; - PyArray_ArgSortImpl *argsort_loop = - ((PyArrayMethod_SortParameters *)context->parameters) - ->flags == NPY_SORT_STABLE ? &npy_amergesort_impl : &npy_aquicksort_impl; npy_string_allocator *allocator = NpyString_acquire_allocator(sdescr); - int ret = argsort_loop( - data[0], (npy_intp *)data[1], dimensions[0], context, - context->descriptors[0]->elsize, &_sort_compare); + int ret = npy_default_argsort_loop(context, data, dimensions, strides, transferdata); NpyString_release_allocator(allocator); return ret; } @@ -965,9 +949,10 @@ init_stringdtype_sorts(void) PyArray_DTypeMeta *stringdtype = &PyArray_StringDType; PyArray_DTypeMeta *sort_dtypes[2] = {stringdtype, stringdtype}; - PyType_Slot sort_slots[3] = { + PyType_Slot sort_slots[4] = { {NPY_METH_resolve_descriptors, &stringdtype_sort_resolve_descriptors}, {NPY_METH_get_loop, &stringdtype_get_sort_loop}, + {_NPY_METH_static_data, &_sort_compare}, {0, NULL} }; PyArrayMethod_Spec sort_spec = { @@ -989,8 +974,9 @@ init_stringdtype_sorts(void) Py_DECREF(sort_method); PyArray_DTypeMeta *argsort_dtypes[2] = {stringdtype, &PyArray_IntpDType}; - PyType_Slot argsort_slots[2] = { + PyType_Slot argsort_slots[3] = { {NPY_METH_get_loop, &stringdtype_get_argsort_loop}, + {_NPY_METH_static_data, &_sort_compare}, {0, NULL} }; PyArrayMethod_Spec argsort_spec = { diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index c437fab2d336..84ad9646838e 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -697,6 +697,9 @@ NpyString_dup(const npy_packed_static_string *in, int used_malloc = 0; if (in_allocator == out_allocator && !is_short_string(in)) { in_buf = in_allocator->malloc(size); + if (in_buf == NULL) { + return -1; + } memcpy(in_buf, vstring_buffer(arena, in_u), size); used_malloc = 1; } diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.h b/numpy/_core/src/multiarray/stringdtype/static_string.h index 385d4dc47cce..ac7bd04b3ebb 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.h +++ b/numpy/_core/src/multiarray/stringdtype/static_string.h @@ -37,7 +37,7 @@ NpyString_free_allocator(npy_string_allocator *allocator); // NPY_EMPTY_STRING into *to_init* is sufficient to initialize it. Does not // check if *to_init* is NULL or if the internal buffer is non-NULL, undefined // behavior or memory leaks are possible if this function is passed a pointer -// to a an uninitialized struct, a NULL pointer, or an existing heap-allocated +// to an uninitialized struct, a NULL pointer, or an existing heap-allocated // string. Returns -1 if allocating the string would exceed the maximum // allowed string size or exhaust available memory. Returns 0 on success. NPY_NO_EXPORT int diff --git a/numpy/_core/src/multiarray/temp_elide.c b/numpy/_core/src/multiarray/temp_elide.c index ea6cac08f78b..6a26ee1f7485 100644 --- a/numpy/_core/src/multiarray/temp_elide.c +++ b/numpy/_core/src/multiarray/temp_elide.c @@ -59,7 +59,7 @@ * supported too by using the appropriate Windows APIs. */ -#if defined HAVE_BACKTRACE && defined HAVE_DLFCN_H && ! defined PYPY_VERSION +#if defined HAVE_BACKTRACE && defined HAVE_DLFCN_H #include @@ -113,9 +113,9 @@ find_addr(void * addresses[], npy_intp naddr, void * addr) static int check_unique_temporary(PyObject *lhs) { -#if PY_VERSION_HEX == 0x030E00A7 && !defined(PYPY_VERSION) +#if PY_VERSION_HEX == 0x030E00A7 #error "NumPy is broken on CPython 3.14.0a7, please update to a newer version" -#elif PY_VERSION_HEX >= 0x030E00B1 && !defined(PYPY_VERSION) +#elif PY_VERSION_HEX >= 0x030E00B1 // Python 3.14 changed the semantics for reference counting temporaries // see https://github.com/python/cpython/issues/133164 return PyUnstable_Object_IsUniqueReferencedTemporary(lhs); diff --git a/numpy/_core/src/multiarray/textreading/field_types.c b/numpy/_core/src/multiarray/textreading/field_types.c index 87b86a64940f..0a7ad3b8b815 100644 --- a/numpy/_core/src/multiarray/textreading/field_types.c +++ b/numpy/_core/src/multiarray/textreading/field_types.c @@ -158,6 +158,7 @@ field_type_grow_recursive(PyArray_Descr *descr, field_type *new_ft = PyMem_Realloc(*ft, alloc_size); if (new_ft == NULL) { field_types_xclear(num_field_types, *ft); + PyErr_NoMemory(); return -1; } *ft = new_ft; @@ -195,6 +196,7 @@ field_types_create(PyArray_Descr *descr, field_type **ft) npy_intp ft_size = 4; *ft = PyMem_Malloc(ft_size * sizeof(field_type)); if (*ft == NULL) { + PyErr_NoMemory(); return -1; } return field_type_grow_recursive(descr, 0, ft, &ft_size, 0); diff --git a/numpy/_core/src/multiarray/textreading/readtext.c b/numpy/_core/src/multiarray/textreading/readtext.c index 4df2446302d6..614b5dfc6608 100644 --- a/numpy/_core/src/multiarray/textreading/readtext.c +++ b/numpy/_core/src/multiarray/textreading/readtext.c @@ -208,21 +208,20 @@ _load_from_filelike(PyObject *NPY_UNUSED(mod), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("_load_from_filelike", args, len_args, kwnames, - "file", NULL, &file, - "|delimiter", &parse_control_character, &pc.delimiter, - "|comment", &parse_control_character, &pc.comment, - "|quote", &parse_control_character, &pc.quote, - "|imaginary_unit", &parse_control_character, &pc.imaginary_unit, - "|usecols", NULL, &usecols_obj, - "|skiplines", &PyArray_IntpFromPyIntConverter, &skiplines, - "|max_rows", &PyArray_IntpFromPyIntConverter, &max_rows, - "|converters", NULL, &converters, - "|dtype", NULL, &dtype, - "|encoding", NULL, &encoding_obj, - "|filelike", &PyArray_BoolConverter, &filelike, - "|byte_converters", &PyArray_BoolConverter, &pc.python_byte_converters, - "|c_byte_converters", PyArray_BoolConverter, &pc.c_byte_converters, - NULL, NULL, NULL) < 0) { + {"file", NULL, &file}, + {"|delimiter", &parse_control_character, &pc.delimiter}, + {"|comment", &parse_control_character, &pc.comment}, + {"|quote", &parse_control_character, &pc.quote}, + {"|imaginary_unit", &parse_control_character, &pc.imaginary_unit}, + {"|usecols", NULL, &usecols_obj}, + {"|skiplines", &PyArray_IntpFromPyIntConverter, &skiplines}, + {"|max_rows", &PyArray_IntpFromPyIntConverter, &max_rows}, + {"|converters", NULL, &converters}, + {"|dtype", NULL, &dtype}, + {"|encoding", NULL, &encoding_obj}, + {"|filelike", &PyArray_BoolConverter, &filelike}, + {"|byte_converters", &PyArray_BoolConverter, &pc.python_byte_converters}, + {"|c_byte_converters", PyArray_BoolConverter, &pc.c_byte_converters}) < 0) { return NULL; } diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index 7f3797b58928..401b8775a4f0 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -186,9 +186,6 @@ read_rows(stream *s, size_t rows_per_block = 1; /* will be increased depending on row size */ npy_intp data_allocated_rows = 0; - /* We give a warning if max_rows is used and an empty line is encountered */ - bool give_empty_row_warning = max_rows >= 0; - int ts_result = 0; tokenizer_state ts; if (npy_tokenizer_init(&ts, pconfig) < 0) { @@ -226,29 +223,8 @@ read_rows(stream *s, } current_num_fields = ts.num_fields; field_info *fields = ts.fields; + if (NPY_UNLIKELY(ts.num_fields == 0)) { - /* - * Deprecated NumPy 1.23, 2021-01-13 (not really a deprecation, - * but similar policy should apply to removing the warning again) - */ - /* Tokenizer may give a final "empty line" even if there is none */ - if (give_empty_row_warning && ts_result == 0) { - give_empty_row_warning = false; - if (PyErr_WarnFormat(PyExc_UserWarning, 3, - "Input line %zd contained no data and will not be " - "counted towards `max_rows=%zd`. This differs from " - "the behaviour in NumPy <=1.22 which counted lines " - "rather than rows. If desired, the previous behaviour " - "can be achieved by using `itertools.islice`.\n" - "Please see the 1.23 release notes for an example on " - "how to do this. If you wish to ignore this warning, " - "use `warnings.filterwarnings`. This warning is " - "expected to be removed in the future and is given " - "only once per `loadtxt` call.", - row_count + skiplines + 1, max_rows) < 0) { - goto error; - } - } continue; /* Ignore empty line */ } diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index 77083c04c519..c3028d183555 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -1,13 +1,13 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE -#define HASH_TABLE_INITIAL_BUCKETS 1024 #include #include #include #include #include +#include #include #include #include @@ -15,6 +15,7 @@ #include #include "numpy/arrayobject.h" #include "gil_utils.h" +#include "raii_utils.hpp" extern "C" { #include "fnv.h" #include "npy_argparse.h" @@ -22,23 +23,47 @@ extern "C" { #include "numpy/halffloat.h" } -// This is to use RAII pattern to handle cpp exceptions while avoiding memory leaks. -// Adapted from https://stackoverflow.com/a/25510879/2536294 -template -struct FinalAction { - FinalAction(F f) : clean_{f} {} - ~FinalAction() { clean_(); } - private: - F clean_; -}; -template -FinalAction finally(F f) { - return FinalAction(f); +// HASH_TABLE_INITIAL_BUCKETS is the reserve hashset capacity used in the +// std::unordered_set instances in the various unique_* functions. +// We use min(input_size, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket +// count: +// - Reserving for all elements (isize) may over-allocate when there are few +// unique values. +// - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps +// memory usage reasonable (4 KiB for pointers). +// See https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 +const npy_intp HASH_TABLE_INITIAL_BUCKETS = 1024; + +// +// Create a 1-d array with the given length that has the same +// dtype as the input `arr`. +// +static inline PyArrayObject * +empty_array_like(PyArrayObject *arr, npy_intp length) +{ + PyArray_Descr *descr = PyArray_DESCR(arr); + Py_INCREF(descr); + + // Create the output array. + PyArrayObject *res_obj = + reinterpret_cast( + PyArray_NewFromDescr( + &PyArray_Type, + descr, + 1, // ndim + &length, // shape + NULL, // strides + NULL, // data + NPY_ARRAY_WRITEABLE, // flags + NULL // obj + ) + ); + return res_obj; } template size_t hash_integer(const T *value, npy_bool equal_nan) { - return std::hash{}(*value); + return npy_fnv1a(reinterpret_cast(value), sizeof(T)); } template @@ -183,19 +208,10 @@ static PyObject* unique_numeric(PyArrayObject *self, npy_bool equal_nan) { /* - * Returns a new NumPy array containing the unique values of the input array of numeric (integer or complex). - * This function uses hashing to identify uniqueness efficiently. - */ - NPY_ALLOW_C_API_DEF; - NPY_ALLOW_C_API; - PyArray_Descr *descr = PyArray_DESCR(self); - Py_INCREF(descr); - NPY_DISABLE_C_API; - - PyThreadState *_save1 = PyEval_SaveThread(); - - // number of elements in the input array - npy_intp isize = PyArray_SIZE(self); + * Returns a new NumPy array containing the unique values of the input + * array of numeric (integer or complex). + * This function uses hashing to identify uniqueness efficiently. + */ auto hash = [equal_nan](const T *value) -> size_t { return hash_func(value, equal_nan); @@ -204,55 +220,38 @@ unique_numeric(PyArrayObject *self, npy_bool equal_nan) return equal_func(lhs, rhs, equal_nan); }; - // Reserve hashset capacity in advance to minimize reallocations and collisions. - // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: - // - Reserving for all elements (isize) may over-allocate when there are few unique values. - // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). - // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 - std::unordered_set hashset( - std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS), hash, equal - ); - - // Input array is one-dimensional, enabling efficient iteration using strides. - char *idata = PyArray_BYTES(self); - npy_intp istride = PyArray_STRIDES(self)[0]; - for (npy_intp i = 0; i < isize; i++, idata += istride) { - hashset.insert((T *)idata); - } + using set_type = std::unordered_set; - npy_intp length = hashset.size(); - - PyEval_RestoreThread(_save1); - NPY_ALLOW_C_API; - PyObject *res_obj = PyArray_NewFromDescr( - &PyArray_Type, - descr, - 1, // ndim - &length, // shape - NULL, // strides - NULL, // data - // This flag is needed to be able to call .sort on it. - NPY_ARRAY_WRITEABLE, // flags - NULL // obj - ); + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + set_type hashset(std::min(isize, HASH_TABLE_INITIAL_BUCKETS), hash, equal); + { + np::raii::SaveThreadState save_thread_state{}; + + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + for (npy_intp i = 0; i < isize; i++, idata += istride) { + hashset.insert(reinterpret_cast(idata)); + } + } + + PyArrayObject *res_obj = empty_array_like(self, hashset.size()); if (res_obj == NULL) { return NULL; } - NPY_DISABLE_C_API; - PyThreadState *_save2 = PyEval_SaveThread(); - auto save2_dealloc = finally([&]() { - PyEval_RestoreThread(_save2); - }); - - char *odata = PyArray_BYTES((PyArrayObject *)res_obj); - npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; - // Output array is one-dimensional, enabling efficient iteration using strides. - for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { - copy_func(odata, *it); + + { + np::raii::SaveThreadState save_thread_state{}; + + char *odata = PyArray_BYTES(res_obj); + npy_intp ostride = PyArray_STRIDES(res_obj)[0]; + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + copy_func(odata, *it); + } } - return res_obj; + return reinterpret_cast(res_obj); } template @@ -260,23 +259,16 @@ static PyObject* unique_string(PyArrayObject *self, npy_bool equal_nan) { /* - * Returns a new NumPy array containing the unique values of the input array of fixed size strings. - * This function uses hashing to identify uniqueness efficiently. - */ - NPY_ALLOW_C_API_DEF; - NPY_ALLOW_C_API; - PyArray_Descr *descr = PyArray_DESCR(self); - Py_INCREF(descr); - NPY_DISABLE_C_API; - - PyThreadState *_save1 = PyEval_SaveThread(); - - // number of elements in the input array - npy_intp isize = PyArray_SIZE(self); + * Returns a new NumPy array containing the unique values of the input + * array of fixed size strings. + * This function uses hashing to identify uniqueness efficiently. + */ + PyArray_Descr *descr = PyArray_DESCR(self); // variables for the string npy_intp itemsize = descr->elsize; npy_intp num_chars = itemsize / sizeof(T); + auto hash = [num_chars](const T *value) -> size_t { return npy_fnv1a(value, num_chars * sizeof(T)); }; @@ -284,77 +276,48 @@ unique_string(PyArrayObject *self, npy_bool equal_nan) return std::memcmp(lhs, rhs, itemsize) == 0; }; - // Reserve hashset capacity in advance to minimize reallocations and collisions. - // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: - // - Reserving for all elements (isize) may over-allocate when there are few unique values. - // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). - // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 - std::unordered_set hashset( - std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS), hash, equal - ); - - // Input array is one-dimensional, enabling efficient iteration using strides. - char *idata = PyArray_BYTES(self); - npy_intp istride = PyArray_STRIDES(self)[0]; - for (npy_intp i = 0; i < isize; i++, idata += istride) { - hashset.insert((T *)idata); - } + using set_type = std::unordered_set; - npy_intp length = hashset.size(); - - PyEval_RestoreThread(_save1); - NPY_ALLOW_C_API; - PyObject *res_obj = PyArray_NewFromDescr( - &PyArray_Type, - descr, - 1, // ndim - &length, // shape - NULL, // strides - NULL, // data - // This flag is needed to be able to call .sort on it. - NPY_ARRAY_WRITEABLE, // flags - NULL // obj - ); + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + set_type hashset(std::min(isize, HASH_TABLE_INITIAL_BUCKETS), hash, equal); + + { + np::raii::SaveThreadState save_thread_state{}; + + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + for (npy_intp i = 0; i < isize; i++, idata += istride) { + hashset.insert(reinterpret_cast(idata)); + } + } + PyArrayObject *res_obj = empty_array_like(self, hashset.size()); if (res_obj == NULL) { return NULL; } - NPY_DISABLE_C_API; - PyThreadState *_save2 = PyEval_SaveThread(); - auto save2_dealloc = finally([&]() { - PyEval_RestoreThread(_save2); - }); - - char *odata = PyArray_BYTES((PyArrayObject *)res_obj); - npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; - // Output array is one-dimensional, enabling efficient iteration using strides. - for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { - std::memcpy(odata, *it, itemsize); + + { + np::raii::SaveThreadState save_thread_state{}; + + char *odata = PyArray_BYTES(res_obj); + npy_intp ostride = PyArray_STRIDES(res_obj)[0]; + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + std::memcpy(odata, *it, itemsize); + } } - return res_obj; + return reinterpret_cast(res_obj); } static PyObject* unique_vstring(PyArrayObject *self, npy_bool equal_nan) { /* - * Returns a new NumPy array containing the unique values of the input array. - * This function uses hashing to identify uniqueness efficiently. - */ - NPY_ALLOW_C_API_DEF; - NPY_ALLOW_C_API; - PyArray_Descr *descr = PyArray_DESCR(self); - Py_INCREF(descr); - NPY_DISABLE_C_API; - - PyThreadState *_save1 = PyEval_SaveThread(); - - // number of elements in the input array - npy_intp isize = PyArray_SIZE(self); + * Returns a new NumPy array containing the unique values of the input array. + * This function uses hashing to identify uniqueness efficiently. + */ - // variables for the vstring - npy_string_allocator *in_allocator = NpyString_acquire_allocator((PyArray_StringDTypeObject *)descr); auto hash = [equal_nan](const npy_static_string *value) -> size_t { if (value->buf == NULL) { if (equal_nan) { @@ -382,83 +345,70 @@ unique_vstring(PyArrayObject *self, npy_bool equal_nan) return std::memcmp(lhs->buf, rhs->buf, lhs->size) == 0; }; - // Reserve hashset capacity in advance to minimize reallocations and collisions. - // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: - // - Reserving for all elements (isize) may over-allocate when there are few unique values. - // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). - // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 - std::unordered_set hashset( - std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS), hash, equal - ); - - // Input array is one-dimensional, enabling efficient iteration using strides. - char *idata = PyArray_BYTES(self); - npy_intp istride = PyArray_STRIDES(self)[0]; - // unpacked_strings need to be allocated outside of the loop because of the lifetime problem. + npy_intp isize = PyArray_SIZE(self); + // unpacked_strings must live as long as hashset because hashset points + // to values in this vector. std::vector unpacked_strings(isize, {0, NULL}); - for (npy_intp i = 0; i < isize; i++, idata += istride) { - npy_packed_static_string *packed_string = (npy_packed_static_string *)idata; - int is_null = NpyString_load(in_allocator, packed_string, &unpacked_strings[i]); - if (is_null == -1) { - npy_gil_error(PyExc_RuntimeError, - "Failed to load string from packed static string. "); - return NULL; + + using set_type = std::unordered_set; + set_type hashset(std::min(isize, HASH_TABLE_INITIAL_BUCKETS), hash, equal); + + { + PyArray_StringDTypeObject *descr = + reinterpret_cast(PyArray_DESCR(self)); + np::raii::NpyStringAcquireAllocator alloc(descr); + np::raii::SaveThreadState save_thread_state{}; + + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + + for (npy_intp i = 0; i < isize; i++, idata += istride) { + npy_packed_static_string *packed_string = + reinterpret_cast(idata); + int is_null = NpyString_load(alloc.allocator(), packed_string, + &unpacked_strings[i]); + if (is_null == -1) { + // Unexpected error. Throw a C++ exception that will be caught + // by the caller of unique_vstring() and converted into a Python + // RuntimeError. + throw std::runtime_error("Failed to load string from packed " + "static string."); + } + hashset.insert(&unpacked_strings[i]); } - hashset.insert(&unpacked_strings[i]); } - NpyString_release_allocator(in_allocator); - - npy_intp length = hashset.size(); - - PyEval_RestoreThread(_save1); - NPY_ALLOW_C_API; - PyObject *res_obj = PyArray_NewFromDescr( - &PyArray_Type, - descr, - 1, // ndim - &length, // shape - NULL, // strides - NULL, // data - // This flag is needed to be able to call .sort on it. - NPY_ARRAY_WRITEABLE, // flags - NULL // obj - ); + PyArrayObject *res_obj = empty_array_like(self, hashset.size()); if (res_obj == NULL) { return NULL; } - PyArray_Descr *res_descr = PyArray_DESCR((PyArrayObject *)res_obj); - Py_INCREF(res_descr); - NPY_DISABLE_C_API; - - PyThreadState *_save2 = PyEval_SaveThread(); - auto save2_dealloc = finally([&]() { - PyEval_RestoreThread(_save2); - }); - - npy_string_allocator *out_allocator = NpyString_acquire_allocator((PyArray_StringDTypeObject *)res_descr); - auto out_allocator_dealloc = finally([&]() { - NpyString_release_allocator(out_allocator); - }); - - char *odata = PyArray_BYTES((PyArrayObject *)res_obj); - npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; - // Output array is one-dimensional, enabling efficient iteration using strides. - for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { - npy_packed_static_string *packed_string = (npy_packed_static_string *)odata; - int pack_status = 0; - if ((*it)->buf == NULL) { - pack_status = NpyString_pack_null(out_allocator, packed_string); - } else { - pack_status = NpyString_pack(out_allocator, packed_string, (*it)->buf, (*it)->size); - } - if (pack_status == -1) { - // string packing failed - return NULL; + + { + PyArray_StringDTypeObject *res_descr = + reinterpret_cast(PyArray_DESCR(res_obj)); + np::raii::NpyStringAcquireAllocator alloc(res_descr); + np::raii::SaveThreadState save_thread_state{}; + + char *odata = PyArray_BYTES(res_obj); + npy_intp ostride = PyArray_STRIDES(res_obj)[0]; + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + npy_packed_static_string *packed_string = + reinterpret_cast(odata); + int pack_status = 0; + if ((*it)->buf == NULL) { + pack_status = NpyString_pack_null(alloc.allocator(), packed_string); + } else { + pack_status = NpyString_pack(alloc.allocator(), packed_string, + (*it)->buf, (*it)->size); + } + if (pack_status == -1) { + // string packing failed + return NULL; + } } } - - return res_obj; + return reinterpret_cast(res_obj); } @@ -544,29 +494,31 @@ array__unique_hash(PyObject *NPY_UNUSED(module), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("_unique_hash", args, len_args, kwnames, - "arr", &PyArray_Converter, &arr, - "|equal_nan", &PyArray_BoolConverter, &equal_nan, - NULL, NULL, NULL - ) < 0 - ) { + {"arr", (void *)&PyArray_Converter, &arr}, + {"|equal_nan", (void *)&PyArray_BoolConverter, &equal_nan}) < 0) { + Py_XDECREF(arr); return NULL; } + PyObject *result = NULL; try { auto type = PyArray_TYPE(arr); // we only support data types present in our unique_funcs map if (unique_funcs.find(type) == unique_funcs.end()) { - Py_RETURN_NOTIMPLEMENTED; + result = Py_NewRef(Py_NotImplemented); + } + else { + result = unique_funcs[type](arr, equal_nan); } - - return unique_funcs[type](arr, equal_nan); } catch (const std::bad_alloc &e) { PyErr_NoMemory(); - return NULL; + result = NULL; } catch (const std::exception &e) { PyErr_SetString(PyExc_RuntimeError, e.what()); - return NULL; + result = NULL; } + Py_DECREF(arr); + return result; } diff --git a/numpy/_core/src/multiarray/usertypes.c b/numpy/_core/src/multiarray/usertypes.c index 71c95a8ae39c..78559fe9c80e 100644 --- a/numpy/_core/src/multiarray/usertypes.c +++ b/numpy/_core/src/multiarray/usertypes.c @@ -179,8 +179,8 @@ PyArray_RegisterDataType(PyArray_DescrProto *descr_proto) return -1; } descr_proto->type_num = -1; - if (PyDataType_ISUNSIZED(descr_proto)) { - PyErr_SetString(PyExc_ValueError, "cannot register a" \ + if (descr_proto->elsize == 0) { + PyErr_SetString(PyExc_ValueError, "cannot register a " \ "flexible data-type"); return -1; } diff --git a/numpy/_core/src/npysort/binsearch.cpp b/numpy/_core/src/npysort/binsearch.cpp index f3f091e99fca..6094bebe9f1a 100644 --- a/numpy/_core/src/npysort/binsearch.cpp +++ b/numpy/_core/src/npysort/binsearch.cpp @@ -64,43 +64,111 @@ binsearch(const char *arr, const char *key, char *ret, npy_intp arr_len, { using T = typename Tag::type; auto cmp = side_to_cmp::value; - npy_intp min_idx = 0; - npy_intp max_idx = arr_len; - T last_key_val; - if (key_len == 0) { + // If the array length is 0 we return all 0s + if (arr_len <= 0) { + for (npy_intp i = 0; i < key_len; ++i) { + *(npy_intp *)(ret + i * ret_str) = 0; + } return; } - last_key_val = *(const T *)key; - for (; key_len > 0; key_len--, key += key_str, ret += ret_str) { - const T key_val = *(const T *)key; - /* - * Updating only one of the indices based on the previous key - * gives the search a big boost when keys are sorted, but slightly - * slows down things for purely random ones. - */ - if (cmp(last_key_val, key_val)) { - max_idx = arr_len; - } - else { - min_idx = 0; - max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len; - } + /* + In this binary search implementation, the candidate insertion indices for + the j-th key are in the range [base_j, base_j+length] and on each + iteration we pick a pivot at the mid-point of the range to compare against + the j-th key. Depending on the comparison result, we adjust the base_j and + halve the length of the interval. + + To batch multiple queries, we process all bases with a fixed length. The + length is halved on each iteration of an outer loop and all bases are + updated in an inner loop. To avoid consuming extra memory, we use the + result array to store intermediate values of each base until they become + the final result in the last step. + + There are two benefits of this approach: + + 1. Cache locallity of pivots. In early iterations each key is compared + against the same set of pivots. For example, in the first iteration all + keys are compared against the median. In the second iteration, all keys + end up being compared against 1st and 3rd quartiles. + + 2. Independent calculations for out-of-order execution. In the single-key + version, step i+1 depends on computation of step i. Meaning that step i+1 + must wait for step i to complete before proceeding. When batching multiple + keys, we compute each step for all keys before continuing on the next + step. All the computations at a given step are independent across + different keys. Meaning that the CPU can execute multiple keys + out-of-order in parallel. + + Invariant (for every j): + - cmp(arr[i], key_val_j) == true for all i < base_j + - cmp(arr[i], key_val_j) == false for all i >= base_j + length + + where cmp(a, b) operator depends on side input: + - For side = "left", cmp operator is < + - For side = "right", cmp operator is <= + + The insertion index candidates are in range [base, base+length] and + on each iteration we shrink the range into either + [base, ceil(length / 2)] + or + [base + floor(length / 2), ceil(length / 2)] + + The outer loop terminates when length = 1. At that point, for each j + the insertion order is either base_j or base_j + 1. An additional + comparison is required to determine which of the two values. + If cmp(arr[base_j], key_val_j) == true, insertion index is base_j + 1. + Otherwise the insertion order is base_j. + + Optimization: we unroll the first iteration for the following reasons: + 1. ret is not initialized with the bases, so we save |keys| writes + by not having to initialize it with 0s. + 2. By assuming the initial base for every key is 0, we also save + |keys| reads. + 3. In the first iteration, all elements are compared against the + median. So we can store it in a variable and use it for all keys. + + This initial block replaces the initialization loop that is used for the + arr_len==0 case. Note that when arr_len = 1, then half is 0 so the + following block initializes the array as with 0s. + */ + npy_intp interval_length = arr_len; + npy_intp half = interval_length >> 1; + interval_length -= half; // length -> ceil(length / 2) + + npy_intp base = 0; + const T mid_val = *(const T *)(arr + (base + half) * arr_str); + + for (npy_intp i = 0; i < key_len; ++i) { + const T key_val = *(const T *)(key + i * key_str); + *(npy_intp *)(ret + i * ret_str) = cmp(mid_val, key_val) * half; + } - last_key_val = key_val; + while (interval_length > 1) { + npy_intp half = interval_length >> 1; + interval_length -= half; // length -> ceil(length / 2) - while (min_idx < max_idx) { - const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1); - const T mid_val = *(const T *)(arr + mid_idx * arr_str); - if (cmp(mid_val, key_val)) { - min_idx = mid_idx + 1; - } - else { - max_idx = mid_idx; - } + for (npy_intp i = 0; i < key_len; ++i) { + npy_intp &base = *(npy_intp *)(ret + i * ret_str); + const T mid_val = *(const T *)(arr + (base + half) * arr_str); + const T key_val = *(const T *)(key + i * key_str); + base += cmp(mid_val, key_val) * half; } - *(npy_intp *)ret = min_idx; + } + + /* + At this point interval_length == 1, so the candidates are in the + interval [base, base + 1]. + + We have two options: + If cmp(arr[base], key_val) == true, insertion index is base + 1 + Otherwise the insertion order is just base + */ + for (npy_intp i = 0; i < key_len; ++i) { + npy_intp &base = *(npy_intp *)(ret + i * ret_str); + const T key_val = *(const T *)(key + i * key_str); + base += cmp(*(const T *)(arr + base * arr_str), key_val); } } @@ -113,51 +181,55 @@ argbinsearch(const char *arr, const char *key, const char *sort, char *ret, { using T = typename Tag::type; auto cmp = side_to_cmp::value; - npy_intp min_idx = 0; - npy_intp max_idx = arr_len; - T last_key_val; - if (key_len == 0) { + // If the array length is 0 we return all 0s + if (arr_len <= 0) { + for (npy_intp i = 0; i < key_len; ++i) { + *(npy_intp *)(ret + i * ret_str) = 0; + } return 0; } - last_key_val = *(const T *)key; - for (; key_len > 0; key_len--, key += key_str, ret += ret_str) { - const T key_val = *(const T *)key; - /* - * Updating only one of the indices based on the previous key - * gives the search a big boost when keys are sorted, but slightly - * slows down things for purely random ones. - */ - if (cmp(last_key_val, key_val)) { - max_idx = arr_len; - } - else { - min_idx = 0; - max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len; - } + npy_intp interval_length = arr_len; + npy_intp half = interval_length >> 1; + interval_length -= half; // length -> ceil(length / 2) - last_key_val = key_val; + npy_intp base = 0; + npy_intp mid_idx = *(npy_intp *)(sort + (base + half) * sort_str); + if (mid_idx < 0 || mid_idx >= arr_len) { + return -1; + } + const T mid_val = *(const T *)(arr + mid_idx * arr_str); - while (min_idx < max_idx) { - const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1); - const npy_intp sort_idx = *(npy_intp *)(sort + mid_idx * sort_str); - T mid_val; + for (npy_intp i = 0; i < key_len; ++i) { + const T key_val = *(const T *)(key + i * key_str); + *(npy_intp *)(ret + i * ret_str) = cmp(mid_val, key_val) * half; + } - if (sort_idx < 0 || sort_idx >= arr_len) { + while (interval_length > 1) { + npy_intp half = interval_length >> 1; + interval_length -= half; // length -> ceil(length / 2) + + for (npy_intp i = 0; i < key_len; ++i) { + npy_intp &base = *(npy_intp *)(ret + i * ret_str); + npy_intp mid_idx = *(npy_intp *)(sort + (base + half) * sort_str); + if (mid_idx < 0 || mid_idx >= arr_len) { return -1; } + const T mid_val = *(const T *)(arr + mid_idx * arr_str); + const T key_val = *(const T *)(key + i * key_str); + base += cmp(mid_val, key_val) * half; + } + } - mid_val = *(const T *)(arr + sort_idx * arr_str); - - if (cmp(mid_val, key_val)) { - min_idx = mid_idx + 1; - } - else { - max_idx = mid_idx; - } + for (npy_intp i = 0; i < key_len; ++i) { + npy_intp &base = *(npy_intp *)(ret + i * ret_str); + npy_intp mid_idx = *(npy_intp *)(sort + base * sort_str); + if (mid_idx < 0 || mid_idx >= arr_len) { + return -1; } - *(npy_intp *)ret = min_idx; + const T key_val = *(const T *)(key + i * key_str); + base += cmp(*(const T *)(arr + mid_idx * arr_str), key_val); } return 0; } diff --git a/numpy/_core/src/npysort/quicksort.cpp b/numpy/_core/src/npysort/quicksort.cpp index 2f5adde17b64..3371c02aef49 100644 --- a/numpy/_core/src/npysort/quicksort.cpp +++ b/numpy/_core/src/npysort/quicksort.cpp @@ -79,7 +79,7 @@ inline bool quicksort_dispatch(T *start, npy_intp num) #if !defined(__CYGWIN__) using TF = typename np::meta::FixedWidth::Type; void (*dispfunc)(TF*, intptr_t) = nullptr; - if (sizeof(T) == sizeof(uint16_t)) { + if constexpr (sizeof(T) == sizeof(uint16_t)) { #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit #include "x86_simd_qsort_16bit.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); @@ -88,7 +88,7 @@ inline bool quicksort_dispatch(T *start, npy_intp num) NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::highway::qsort_simd::template QSort, ); #endif } - else if (sizeof(T) == sizeof(uint32_t) || sizeof(T) == sizeof(uint64_t)) { + else if constexpr (sizeof(T) == sizeof(uint32_t) || sizeof(T) == sizeof(uint64_t)) { #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit #include "x86_simd_qsort.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); diff --git a/numpy/_core/src/npysort/x86-simd-sort b/numpy/_core/src/npysort/x86-simd-sort index 6a7a01da4b0d..5adb33411f3c 160000 --- a/numpy/_core/src/npysort/x86-simd-sort +++ b/numpy/_core/src/npysort/x86-simd-sort @@ -1 +1 @@ -Subproject commit 6a7a01da4b0dfde108aa626a2364c954e2c50fe1 +Subproject commit 5adb33411f3cea8bdbafa9d91bd75bc4bf19c7dd diff --git a/numpy/_core/src/umath/_rational_tests.c b/numpy/_core/src/umath/_rational_tests.c index d257bc22d051..c00c8468b651 100644 --- a/numpy/_core/src/umath/_rational_tests.c +++ b/numpy/_core/src/umath/_rational_tests.c @@ -1097,7 +1097,7 @@ rational_ufunc_test_add_rationals(char** args, npy_intp const *dimensions, } -PyMethodDef module_methods[] = { +static PyMethodDef module_methods[] = { {0} /* sentinel */ }; diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index 020e903b5fc8..d26cd9b9b7ca 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -1026,8 +1026,9 @@ sfloat_init_ufuncs(void) { PyUFunc_LoopSlot loops[] = { {"multiply", &multiply_spec}, {"_core._multiarray_umath.add", &add_spec}, - {"numpy:sort", &sort_spec}, - {"numpy._core.fromnumeric:argsort", &argsort_spec}, + // These names must match exactly right now (not ufuncs) + {"sort", &sort_spec}, + {"argsort", &argsort_spec}, {NULL, NULL} }; if (PyUFunc_AddLoopsFromSpecs(loops) < 0) { @@ -1077,7 +1078,7 @@ sfloat_init_ufuncs(void) { NPY_NO_EXPORT PyObject * get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) { - if (npy_thread_unsafe_state.get_sfloat_dtype_initialized) { + if (npy_global_state.get_sfloat_dtype_initialized) { Py_INCREF(&PyArray_SFloatDType); return (PyObject *)&PyArray_SFloatDType; } @@ -1106,6 +1107,6 @@ get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) return NULL; } - npy_thread_unsafe_state.get_sfloat_dtype_initialized = NPY_TRUE; + npy_global_state.get_sfloat_dtype_initialized = NPY_TRUE; return (PyObject *)&PyArray_SFloatDType; } diff --git a/numpy/_core/src/umath/_umath_tests.c.src b/numpy/_core/src/umath/_umath_tests.c.src index a1b64ecc0444..a37da4cd98bb 100644 --- a/numpy/_core/src/umath/_umath_tests.c.src +++ b/numpy/_core/src/umath/_umath_tests.c.src @@ -857,6 +857,72 @@ static void *const conv1d_full_data[] = {NULL}; static const char conv1d_full_typecodes[] = {NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE}; +/* + * Test helpers for PyUFunc_ReplaceLoopBySignature. + * + * _constant42_loop: unary float64 loop that always writes 42.0. + */ +static void +_constant42_loop(char **args, npy_intp const *dimensions, + npy_intp const *steps, void *NPY_UNUSED(data)) +{ + npy_intp n = dimensions[0]; + char *out = args[1]; + npy_intp out_step = steps[1]; + for (npy_intp i = 0; i < n; i++) { + *(double *)out = 42.0; + out += out_step; + } +} + +/* + * replace_loop(ufunc): Replace the dd->d loop with _constant42_loop. + * Only works for unary ufuncs. Returns a capsule holding the old loop. + */ +static PyObject * +UMath_Tests_replace_loop(PyObject *NPY_UNUSED(dummy), PyObject *args) +{ + PyUFuncObject *ufunc; + if (!PyArg_ParseTuple(args, "O!", &PyUFunc_Type, &ufunc)) { + return NULL; + } + if (ufunc->nin != 1 || ufunc->nout != 1) { + PyErr_SetString(PyExc_ValueError, + "replace_loop only supports unary ufuncs"); + return NULL; + } + int signature[2] = {NPY_DOUBLE, NPY_DOUBLE}; + PyUFuncGenericFunction oldfunc = NULL; + if (PyUFunc_ReplaceLoopBySignature( + ufunc, _constant42_loop, signature, &oldfunc) < 0) { + PyErr_SetString(PyExc_RuntimeError, + "failed to find a float64 loop"); + return NULL; + } + return PyCapsule_New((void *)oldfunc, "oldfunc", NULL); +} + +/* + * restore_loop(ufunc, capsule): Restore the loop saved by replace_loop. + */ +static PyObject * +UMath_Tests_restore_loop(PyObject *NPY_UNUSED(dummy), PyObject *args) +{ + PyUFuncObject *ufunc; + PyObject *capsule; + if (!PyArg_ParseTuple(args, "O!O", &PyUFunc_Type, &ufunc, &capsule)) { + return NULL; + } + PyUFuncGenericFunction oldfunc = (PyUFuncGenericFunction) + PyCapsule_GetPointer(capsule, "oldfunc"); + if (oldfunc == NULL) { + return NULL; + } + int signature[2] = {NPY_DOUBLE, NPY_DOUBLE}; + PyUFunc_ReplaceLoopBySignature(ufunc, oldfunc, signature, NULL); + Py_RETURN_NONE; +} + static PyMethodDef UMath_TestsMethods[] = { {"test_signature", UMath_Tests_test_signature, METH_VARARGS, "Test signature parsing of ufunc. \n" @@ -865,6 +931,12 @@ static PyMethodDef UMath_TestsMethods[] = { "internals. \n", }, {"test_dispatch", UMath_Tests_test_dispatch, METH_NOARGS, NULL}, + {"replace_loop", UMath_Tests_replace_loop, METH_VARARGS, + "Replace the float64 loop of a ufunc with one that outputs 42.0.\n" + "Returns a capsule holding the old loop for restore_loop().\n"}, + {"restore_loop", UMath_Tests_restore_loop, METH_VARARGS, + "Restore a ufunc loop previously replaced by replace_loop().\n" + "Arguments: ufunc, capsule\n"}, {NULL, NULL, 0, NULL} /* Sentinel */ }; diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index db5698d8a819..340c0bd7f8e2 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -6,8 +6,8 @@ * * - operand_DTypes: The datatypes as passed in by the user. * - signature: The DTypes fixed by the user with `dtype=` or `signature=`. - * - ufunc._loops: A list of all ArrayMethods and promoters, it contains - * tuples `(dtypes, ArrayMethod)` or `(dtypes, promoter)`. + * - ufunc._loops: Ordered dict of all ArrayMethods and promoters, mapping + * `dtypes` to tuples `(dtypes, ArrayMethod)` or `(dtypes, promoter)`. * - ufunc._dispatch_cache: A cache to store previous promotion and/or * dispatching results. * - The actual arrays are used to support the old code paths where necessary. @@ -42,9 +42,6 @@ #include #include -#include -#include - #include "numpy/ndarraytypes.h" #include "numpy/npy_3kcompat.h" #include "npy_import.h" @@ -73,8 +70,8 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, /** - * Function to add a new loop to the ufunc. This mainly appends it to the - * list (as it currently is just a list). + * Function to add a new loop to the ufunc. This adds it to the + * _loops dict keyed by the DType tuple. * * @param ufunc The universal function to add the loop to. * @param info The tuple (dtype_tuple, ArrayMethod/promoter). @@ -117,38 +114,16 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate) return -1; } - if (ufunc->_loops == NULL) { - ufunc->_loops = PyList_New(0); - if (ufunc->_loops == NULL) { - return -1; - } + int found = PyDict_SetDefaultRef(ufunc->_loops, DType_tuple, info, NULL); + if (found < 0) { + return -1; } - - PyObject *loops = ufunc->_loops; - Py_ssize_t length = PyList_Size(loops); - for (Py_ssize_t i = 0; i < length; i++) { - PyObject *item = PyList_GetItemRef(loops, i); - PyObject *cur_DType_tuple = PyTuple_GetItem(item, 0); - Py_DECREF(item); - int cmp = PyObject_RichCompareBool(cur_DType_tuple, DType_tuple, Py_EQ); - if (cmp < 0) { - return -1; - } - if (cmp == 0) { - continue; - } - if (ignore_duplicate) { - return 0; - } + if (found && !ignore_duplicate) { PyErr_Format(PyExc_TypeError, "A loop/promoter has already been registered with '%s' for %R", ufunc_get_name_cstr(ufunc), DType_tuple); return -1; } - - if (PyList_Append(loops, info) < 0) { - return -1; - } return 0; } @@ -182,6 +157,7 @@ PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv) PyObject *dtypes = PyArray_TupleFromItems( nargs, (PyObject **)bmeth->dtypes, 1); if (dtypes == NULL) { + Py_DECREF(bmeth); return -1; } PyObject *info = PyTuple_Pack(2, dtypes, bmeth->method); @@ -190,7 +166,36 @@ PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv) if (info == NULL) { return -1; } - return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + int res = PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + Py_DECREF(info); + return res; +} + + + +/* + * There are a few ArrayMethods we store on the DType since they are not + * (only) used via ufuncs. Some don't need to be bound (DTypes attached) + * and are currently not. + */ +template +static int +set_static_method(PyArrayMethod_Spec *spec) { + PyArray_DTypeMeta *dtype = spec->dtypes[0]; + PyBoundArrayMethodObject *meth = PyArrayMethod_FromSpec_int(spec, 0); + if (meth == NULL) { + return -1; + } + + if constexpr (!bound) { + Py_INCREF(meth->method); + NPY_DT_SLOTS(dtype)->*slot = meth->method; + Py_DECREF(meth); + } + else { + NPY_DT_SLOTS(dtype)->*slot = meth; + } + return 0; } @@ -202,64 +207,67 @@ PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv) NPY_NO_EXPORT int PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) { - if (npy_cache_import_runtime( - "numpy", "sort", &npy_runtime_imports.sort) < 0) { - return -1; - } - if (npy_cache_import_runtime( - "numpy", "argsort", &npy_runtime_imports.argsort) < 0) { - return -1; - } + int ret = -1; + PyObject *ufunc = NULL; PyUFunc_LoopSlot *slot; for (slot = slots; slot->name != NULL; slot++) { - PyObject *ufunc = npy_import_entry_point(slot->name); - if (ufunc == NULL) { - return -1; + // Hardcode slot names for attributes and non-ufuncs stored on the DType + // (Also avoids circular imports a bit.) + if (strcmp(slot->name, "real") == 0) { + Py_XSETREF(ufunc, Py_NewRef(npy_interned_str.real)); } - - if (ufunc == npy_runtime_imports.sort) { - Py_DECREF(ufunc); - - PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; - PyBoundArrayMethodObject *sort_meth = PyArrayMethod_FromSpec_int(slot->spec, 0); - if (sort_meth == NULL) { - return -1; + else if (strcmp(slot->name, "imag") == 0) { + Py_XSETREF(ufunc, Py_NewRef(npy_interned_str.imag)); + } + else if (strcmp(slot->name, "sort") == 0) { + Py_XSETREF(ufunc, Py_NewRef(npy_interned_str.sort)); + } + else if (strcmp(slot->name, "argsort") == 0) { + Py_XSETREF(ufunc, Py_NewRef(npy_interned_str.argsort)); + } + else { + Py_XSETREF(ufunc, npy_import_entry_point(slot->name)); + if (ufunc == NULL) { + goto finish; } - - NPY_DT_SLOTS(dtype)->sort_meth = sort_meth->method; - Py_INCREF(sort_meth->method); - Py_DECREF(sort_meth); } - else if (ufunc == npy_runtime_imports.argsort) { - Py_DECREF(ufunc); - PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; - PyBoundArrayMethodObject *argsort_meth = PyArrayMethod_FromSpec_int(slot->spec, 0); - if (argsort_meth == NULL) { - return -1; + if (ufunc == npy_interned_str.real) { + if (set_static_method<&NPY_DType_Slots::real_meth, true>(slot->spec) < 0) { + goto finish; + } + } + else if (ufunc == npy_interned_str.imag) { + if (set_static_method<&NPY_DType_Slots::imag_meth, true>(slot->spec) < 0) { + goto finish; + } + } + else if (ufunc == npy_interned_str.sort) { + if (set_static_method<&NPY_DType_Slots::sort_meth, false>(slot->spec) < 0) { + goto finish; + } + } + else if (ufunc == npy_interned_str.argsort) { + if (set_static_method<&NPY_DType_Slots::argsort_meth, false>(slot->spec) < 0) { + goto finish; } - - NPY_DT_SLOTS(dtype)->argsort_meth = argsort_meth->method; - Py_INCREF(argsort_meth->method); - Py_DECREF(argsort_meth); } else { if (!PyObject_TypeCheck(ufunc, &PyUFunc_Type)) { PyErr_Format(PyExc_TypeError, "%s was not a ufunc!", slot->name); - Py_DECREF(ufunc); - return -1; + goto finish; } - - int ret = PyUFunc_AddLoopFromSpec_int(ufunc, slot->spec, 0); - Py_DECREF(ufunc); - if (ret < 0) { - return -1; + if (PyUFunc_AddLoopFromSpec_int(ufunc, slot->spec, 0) < 0) { + goto finish; } } } - return 0; + ret = 0; + finish: + Py_XDECREF(ufunc); + return ret; } @@ -302,7 +310,13 @@ resolve_implementation_info(PyUFuncObject *ufunc, PyObject **out_info) { int nin = ufunc->nin, nargs = ufunc->nargs; - Py_ssize_t size = PySequence_Length(ufunc->_loops); + int ret = -1; + /* PyDict_Values returns a snapshot, safe against concurrent additions. */ + PyObject *loops = PyDict_Values(ufunc->_loops); + if (loops == NULL) { + return -1; + } + Py_ssize_t size = PySequence_Length(loops); PyObject *best_dtypes = NULL; PyObject *best_resolver_info = NULL; @@ -319,8 +333,7 @@ resolve_implementation_info(PyUFuncObject *ufunc, for (Py_ssize_t res_idx = 0; res_idx < size; res_idx++) { /* Test all resolvers */ - PyObject *resolver_info = PySequence_Fast_GET_ITEM( - ufunc->_loops, res_idx); + PyObject *resolver_info = PySequence_Fast_GET_ITEM(loops, res_idx); if (only_promoters && PyObject_TypeCheck( PyTuple_GET_ITEM(resolver_info, 1), &PyArrayMethod_Type)) { @@ -381,7 +394,7 @@ resolve_implementation_info(PyUFuncObject *ufunc, int subclass = PyObject_IsSubclass( (PyObject *)given_dtype, (PyObject *)resolver_dtype); if (subclass < 0) { - return -1; + goto finish; } if (!subclass) { matches = NPY_FALSE; @@ -479,7 +492,7 @@ resolve_implementation_info(PyUFuncObject *ufunc, "a better match is not yet implemented. This " "will pick the better (or bail) in the future."); *out_info = NULL; - return -1; + goto finish; } if (best == -1) { @@ -511,8 +524,9 @@ resolve_implementation_info(PyUFuncObject *ufunc, * We just redo it anyway for simplicity.) */ if (!only_promoters) { - return resolve_implementation_info(ufunc, - op_dtypes, NPY_TRUE, out_info); + ret = resolve_implementation_info( + ufunc, op_dtypes, NPY_TRUE, out_info); + goto finish; } /* * If this is already the retry, we are out of luck. Promoters @@ -534,7 +548,8 @@ resolve_implementation_info(PyUFuncObject *ufunc, Py_DECREF(given); } *out_info = NULL; - return 0; + ret = 0; + goto finish; } else if (current_best == 0) { /* The new match is not better, continue looking. */ @@ -548,11 +563,15 @@ resolve_implementation_info(PyUFuncObject *ufunc, if (best_dtypes == NULL) { /* The non-legacy lookup failed */ *out_info = NULL; - return 0; } + else { + *out_info = best_resolver_info; + } + ret = 0; - *out_info = best_resolver_info; - return 0; +finish: + Py_DECREF(loops); + return ret; } @@ -595,22 +614,21 @@ call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *info, return NULL; } /* - * If none of the dtypes changes, we would recurse infinitely, abort. - * (Of course it is nevertheless possible to recurse infinitely.) - * - * TODO: We could allow users to signal this directly and also move - * the call to be (almost immediate). That would call it - * unnecessarily sometimes, but may allow additional flexibility. - */ - int dtypes_changed = 0; - for (int i = 0; i < nargs; i++) { - if (new_op_dtypes[i] != op_dtypes[i]) { - dtypes_changed = 1; - break; + * If none of the dtypes changes, we would recurse infinitely, abort. + * (Of course it is nevertheless possible to recurse infinitely.) + * If the user indicates a `1` result, we trust the user. + */ + if (promoter_result != 1) { + int dtypes_changed = 0; + for (int i = 0; i < nargs; i++) { + if (new_op_dtypes[i] != op_dtypes[i]) { + dtypes_changed = 1; + break; + } + } + if (!dtypes_changed) { + goto finish; } - } - if (!dtypes_changed) { - goto finish; } } else { @@ -787,7 +805,7 @@ legacy_promote_using_legacy_type_resolver(PyUFuncObject *ufunc, /* - * Note, this function returns a BORROWED references to info since it adds + * Note, this function returns a BORROWED reference to info since it adds * it to the loops. */ NPY_NO_EXPORT PyObject * @@ -816,8 +834,11 @@ add_and_return_legacy_wrapping_ufunc_loop(PyUFuncObject *ufunc, Py_DECREF(info); return NULL; } - Py_DECREF(info); /* now borrowed from the ufunc's list of loops */ - return info; + /* Loop currently borrowed from the _loops (use original if not replaced) */ + PyObject *result = PyDict_GetItemWithError( // noqa: borrowed-ref OK + ufunc->_loops, PyTuple_GET_ITEM(info, 0)); + Py_DECREF(info); + return result; } @@ -826,6 +847,13 @@ add_and_return_legacy_wrapping_ufunc_loop(PyUFuncObject *ufunc, * to use for a ufunc. This function may recurse with `do_legacy_fallback` * set to False. * + * The result is cached in the ufunc's dispatch cache for faster lookup next time. + * It is possible that multiple threads call this function at the same time, and + * there is cache miss, in that case all threads will do the full resolution, however + * only one will store the result in the cache (the others get the stored result). + * This is ensured by `PyArrayIdentityHash_SetItemDefault` which only sets the item + * if it is not already set otherwise returning the existing value. + * * If value-based promotion is necessary, this is handled ahead of time by * `promote_and_get_ufuncimpl`. */ @@ -868,12 +896,13 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, * Found the ArrayMethod and NOT promoter. Before returning it * add it to the cache for faster lookup in the future. */ - if (PyArrayIdentityHash_SetItem( + PyObject *result = NULL; + if (PyArrayIdentityHash_SetItemDefault( (PyArrayIdentityHash *)ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + (PyObject **)op_dtypes, info, &result) < 0) { return NULL; } - return info; + return result; } } @@ -891,12 +920,13 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, } else if (info != NULL) { /* Add result to the cache using the original types: */ - if (PyArrayIdentityHash_SetItem( + PyObject *result = NULL; + if (PyArrayIdentityHash_SetItemDefault( (PyArrayIdentityHash *)ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + (PyObject **)op_dtypes, info, &result) < 0) { return NULL; } - return info; + return result; } } @@ -958,52 +988,20 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, Py_XDECREF(new_op_dtypes[i]); } - /* Add this to the cache using the original types: */ - if (cacheable && PyArrayIdentityHash_SetItem( - (PyArrayIdentityHash *)ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + if (info == NULL) { return NULL; } - return info; -} - -#ifdef Py_GIL_DISABLED -/* - * Fast path for promote_and_get_info_and_ufuncimpl. - * Acquires a read lock to check for a cache hit and then - * only acquires a write lock on a cache miss to fill the cache - */ -static inline PyObject * -promote_and_get_info_and_ufuncimpl_with_locking( - PyUFuncObject *ufunc, - PyArrayObject *const ops[], - PyArray_DTypeMeta *signature[], - PyArray_DTypeMeta *op_dtypes[], - npy_bool legacy_promotion_is_possible) -{ - std::shared_mutex *mutex = ((std::shared_mutex *)((PyArrayIdentityHash *)ufunc->_dispatch_cache)->mutex); - PyObject *info = PyArrayIdentityHash_GetItemWithLock( - (PyArrayIdentityHash *)ufunc->_dispatch_cache, - (PyObject **)op_dtypes); - - if (info != NULL && PyObject_TypeCheck( - PyTuple_GET_ITEM(info, 1), &PyArrayMethod_Type)) { - /* Found the ArrayMethod and NOT a promoter: return it */ - return info; + if (cacheable) { + PyObject *result = NULL; + /* Add this to the cache using the original types: */ + if (PyArrayIdentityHash_SetItemDefault((PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes, info, &result) < 0) { + return NULL; + } + return result; } - - // cache miss, need to acquire a write lock and recursively calculate the - // correct dispatch resolution - NPY_BEGIN_ALLOW_THREADS - mutex->lock(); - NPY_END_ALLOW_THREADS - info = promote_and_get_info_and_ufuncimpl(ufunc, - ops, signature, op_dtypes, legacy_promotion_is_possible); - mutex->unlock(); - return info; } -#endif /** * The central entry-point for the promotion and dispatching machinery. @@ -1093,20 +1091,8 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, } } - /* - * We hold the GIL here, so on the GIL-enabled build the GIL prevents - * races to fill the promotion cache. - * - * On the free-threaded build we need to set up our own locking to prevent - * races to fill the promotion cache. - */ -#ifdef Py_GIL_DISABLED - PyObject *info = promote_and_get_info_and_ufuncimpl_with_locking(ufunc, - ops, signature, op_dtypes, legacy_promotion_is_possible); -#else PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc, ops, signature, op_dtypes, legacy_promotion_is_possible); -#endif if (info == NULL) { goto handle_error; @@ -1357,8 +1343,9 @@ install_logical_ufunc_promoter(PyObject *ufunc) if (info == NULL) { return -1; } - - return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + int res = PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + Py_DECREF(info); + return res; } /* @@ -1374,28 +1361,20 @@ get_info_no_cast(PyUFuncObject *ufunc, PyArray_DTypeMeta *op_dtype, return NULL; } for (int i=0; i < ndtypes; i++) { - PyTuple_SetItem(t_dtypes, i, (PyObject *)op_dtype); - } - PyObject *loops = ufunc->_loops; - Py_ssize_t length = PyList_Size(loops); - for (Py_ssize_t i = 0; i < length; i++) { - PyObject *item = PyList_GetItemRef(loops, i); - PyObject *cur_DType_tuple = PyTuple_GetItem(item, 0); - Py_DECREF(item); - int cmp = PyObject_RichCompareBool(cur_DType_tuple, - t_dtypes, Py_EQ); - if (cmp < 0) { - Py_DECREF(t_dtypes); - return NULL; - } - if (cmp == 0) { - continue; - } - /* Got the match */ + Py_INCREF(op_dtype); + PyTuple_SET_ITEM(t_dtypes, i, (PyObject *)op_dtype); + } + PyObject *info; + if (PyDict_GetItemRef(ufunc->_loops, t_dtypes, &info) < 0) { Py_DECREF(t_dtypes); - return PyTuple_GetItem(item, 1); + return NULL; } Py_DECREF(t_dtypes); + if (info != NULL) { + PyObject *result = PyTuple_GET_ITEM(info, 1); + Py_DECREF(info); + return result; + } Py_RETURN_NONE; } @@ -1432,5 +1411,7 @@ PyUFunc_AddPromoter( if (info == NULL) { return -1; } - return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + int res = PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + Py_DECREF(info); + return res; } diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index 91b0b4c62d30..cf3f517b4c6a 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -15,6 +15,7 @@ #include "numpy/ufuncobject.h" #include "common.h" +#include "npy_pycompat.h" #define UFUNC_ERR_IGNORE 0 @@ -145,6 +146,13 @@ init_extobj(void) if (npy_static_pydata.default_extobj_capsule == NULL) { return -1; } +#ifdef Py_GIL_DISABLED + if (PyUnstable_SetImmortal(npy_static_pydata.default_extobj_capsule) == 0) { + PyErr_SetString(PyExc_RuntimeError, "Could not mark extobj capsule as immortal"); + Py_CLEAR(npy_static_pydata.default_extobj_capsule); + return -1; + } +#endif npy_static_pydata.npy_extobj_contextvar = PyContextVar_New( "numpy.ufunc.extobj", npy_static_pydata.default_extobj_capsule); if (npy_static_pydata.npy_extobj_contextvar == NULL) { @@ -205,14 +213,13 @@ extobj_make_extobj(PyObject *NPY_UNUSED(mod), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("_seterrobj", args, len_args, kwnames, - "$all", &errmodeconverter, &all_mode, - "$divide", &errmodeconverter, ÷_mode, - "$over", &errmodeconverter, &over_mode, - "$under", &errmodeconverter, &under_mode, - "$invalid", &errmodeconverter, &invalid_mode, - "$bufsize", &PyArray_IntpFromPyIntConverter, &bufsize, - "$call", NULL, &pyfunc, - NULL, NULL, NULL) < 0) { + {"$all", &errmodeconverter, &all_mode}, + {"$divide", &errmodeconverter, ÷_mode}, + {"$over", &errmodeconverter, &over_mode}, + {"$under", &errmodeconverter, &under_mode}, + {"$invalid", &errmodeconverter, &invalid_mode}, + {"$bufsize", &PyArray_IntpFromPyIntConverter, &bufsize}, + {"$call", NULL, &pyfunc}) < 0) { return NULL; } diff --git a/numpy/_core/src/umath/legacy_array_method.c b/numpy/_core/src/umath/legacy_array_method.c index 7a85937fcc8f..586d406b9ff9 100644 --- a/numpy/_core/src/umath/legacy_array_method.c +++ b/numpy/_core/src/umath/legacy_array_method.c @@ -367,7 +367,7 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc, flags = _NPY_METH_FORCE_CAST_INPUTS; } - PyArrayMethod_GetReductionInitial *get_reduction_intial = NULL; + PyArrayMethod_GetReductionInitial *get_reduction_initial = NULL; if (ufunc->nin == 2 && ufunc->nout == 1) { npy_bool reorderable = NPY_FALSE; PyObject *identity_obj = PyUFunc_GetDefaultIdentity( @@ -385,7 +385,7 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc, flags |= NPY_METH_IS_REORDERABLE; } if (identity_obj != Py_None) { - get_reduction_intial = &get_initial_from_ufunc; + get_reduction_initial = &get_initial_from_ufunc; } } for (int i = 0; i < ufunc->nin+ufunc->nout; i++) { @@ -398,10 +398,22 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc, } } + /* + * Set NPY_METH_NO_FLOATINGPOINT_ERRORS for non-object loops of ufuncs + * that are known to never raise floating point errors (e.g. comparisons, + * logical operations, abs, neg, copysign, etc.). + * The flag is set on the ufunc via _ufunc_flags during initialization in + * __umath_generated.c (driven by no_float_errors=True in generate_umath.py). + */ + if ((ufunc->_ufunc_flags & UFUNC_NO_FLOATINGPOINT_ERRORS) && + !(flags & NPY_METH_REQUIRES_PYAPI)) { + flags |= NPY_METH_NO_FLOATINGPOINT_ERRORS; + } + PyType_Slot slots[4] = { {NPY_METH_get_loop, &get_wrapped_legacy_ufunc_loop}, {NPY_METH_resolve_descriptors, &simple_legacy_resolve_descriptors}, - {NPY_METH_get_reduction_initial, get_reduction_intial}, + {NPY_METH_get_reduction_initial, get_reduction_initial}, {0, NULL}, }; if (any_output_flexible) { diff --git a/numpy/_core/src/umath/loops.c.src b/numpy/_core/src/umath/loops.c.src index 3928d2a0d0c4..1ad9cab4666e 100644 --- a/numpy/_core/src/umath/loops.c.src +++ b/numpy/_core/src/umath/loops.c.src @@ -688,7 +688,12 @@ TIMEDELTA_sign(char **args, npy_intp const *dimensions, npy_intp const *steps, v { UNARY_LOOP { const npy_timedelta in1 = *(npy_timedelta *)ip1; - *((npy_timedelta *)op1) = in1 > 0 ? 1 : (in1 < 0 ? -1 : 0); + if (in1 == NPY_DATETIME_NAT) { + *((npy_double *)op1) = NPY_NAN; + } + else { + *((npy_double *)op1) = in1 > 0 ? 1.0 : (in1 < 0 ? -1.0 : 0.0); + } } } diff --git a/numpy/_core/src/umath/loops_half.dispatch.c.src b/numpy/_core/src/umath/loops_half.dispatch.c.src index a81a64ed0294..2b17fd622c4b 100644 --- a/numpy/_core/src/umath/loops_half.dispatch.c.src +++ b/numpy/_core/src/umath/loops_half.dispatch.c.src @@ -12,7 +12,7 @@ #define NPY__SVML_IS_ENABLED 0 #endif -#if NPY__SVML_IS_ENABLED && !defined(NPY_HAVE_AVX512_SPR) +#if NPY__SVML_IS_ENABLED typedef __m256i npyvh_f16; #define npyv_cvt_f16_f32 _mm512_cvtph_ps @@ -80,11 +80,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(HALF_@func@) if (!is_mem_overlap(src, steps[0], dst, steps[1], len) && (steps[0] == sizeof(npy_half)) && (steps[1] == sizeof(npy_half))) { - #ifdef NPY_HAVE_AVX512_SPR - __svml_@intrin@s32(src, dst, len); - #else avx512_@intrin@_f16(src, dst, len); - #endif return; } #endif // NPY__SVML_IS_ENABLED diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index 11e014acec7f..fb82a50a13de 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -517,7 +517,7 @@ NPY_NO_EXPORT void void *ip1=args[0], *ip2=args[1], *op=args[2]; #if @USEBLAS@ && defined(HAVE_CBLAS) /* - * TODO: refactor this out to a inner_loop_selector, in + * TODO: refactor this out to an inner_loop_selector, in * PyUFunc_MatmulLoopSelector. But that call does not have access to * n, m, p and strides. */ @@ -714,6 +714,15 @@ OBJECT_dotc(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp npy_intp i; PyObject *result = NULL; + if (n == 0) { + PyObject *zero = PyLong_FromLong(0); + if (zero == NULL) { + return; + } + Py_XSETREF(*((PyObject **)op), zero); + return; + } + for (i = 0; i < n; i++, ip1 += is1, ip2 += is2) { PyObject *obj1 = *(PyObject**)ip1, *obj2 = *(PyObject**)ip2; if (obj1 == NULL) { diff --git a/numpy/_core/src/umath/override.c b/numpy/_core/src/umath/override.c index 139d9c7bdbbd..421359eb6203 100644 --- a/numpy/_core/src/umath/override.c +++ b/numpy/_core/src/umath/override.c @@ -342,7 +342,7 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, } /* * Set override arguments for each call since the tuple must - * not be mutated after use in PyPy + * not be mutated after use * We increase all references since SET_ITEM steals * them and they will be DECREF'd when the tuple is deleted. */ diff --git a/numpy/_core/src/umath/real_imag_ufuncs.cpp b/numpy/_core/src/umath/real_imag_ufuncs.cpp new file mode 100644 index 000000000000..f30c16460709 --- /dev/null +++ b/numpy/_core/src/umath/real_imag_ufuncs.cpp @@ -0,0 +1,297 @@ +/* + * This file implements the real and imag ufuncs which are in turn used + * for the `imag` and `real` attributes of arrays. + * The ArrayMethods are primarily stored on the DType for `real` and `imag` + * while the ufunc uses a promoter to access these dynamically. + */ + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE +#define _UMATHMODULE + +#include +#include "npy_pycompat.h" // PyObject_GetOptionalAttr +#include "numpy/ndarraytypes.h" +#include "numpy/ufuncobject.h" +#include "dispatching.h" + +#include "numpyos.h" +#include "dtypemeta.h" +#include "dtype_transfer.h" +#include "lowlevel_strided_loops.h" +#include "array_method.h" + +#include "real_imag_ufuncs.h" + + +template +static NPY_CASTING +complex_to_real_resolve_descriptors( + PyArrayMethodObject *NPY_UNUSED(self), + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) +{ + Py_INCREF(given_descrs[0]); + loop_descrs[0] = given_descrs[0]; + Py_INCREF(dtypes[1]->singleton); + loop_descrs[1] = dtypes[1]->singleton; + + if (PyDataType_ISBYTESWAPPED(loop_descrs[0])) { + Py_SETREF( + loop_descrs[1], PyArray_DescrNewByteorder(loop_descrs[1], NPY_SWAP)); + if (loop_descrs[1] == NULL) { + Py_DECREF(loop_descrs[0]); + return _NPY_ERROR_OCCURRED_IN_CAST; + } + } + if constexpr (real_part) { + *view_offset = 0; + } + else { + *view_offset = loop_descrs[1]->elsize; + } + return NPY_NO_CASTING; +} + + +/* We shouldn't normally use it, but define a simple loop anyway. */ +template +static int extract_complex_part_loop( + PyArrayMethod_Context *context, char *const data[], + npy_intp const dimensions[], npy_intp const strides[], + NpyAuxData *NPY_UNUSED(auxdata)) +{ + npy_intp N = dimensions[0]; + char *in = data[0]; + char *out = data[1]; + npy_intp istride = strides[0]; + npy_intp ostride = strides[1]; + + if constexpr (!real_part) { + in += sizeof(real_type); + } + + while (N--) { + real_type value = *reinterpret_cast(in); + *reinterpret_cast(out) = value; + in += istride; + out += ostride; + } + return 0; +} + + +template +static int +object_get_comp_strided_loop( + PyArrayMethod_Context *context, char *const data[], + npy_intp const dimensions[], npy_intp const strides[], + NpyAuxData *auxdata) +{ + npy_intp N = dimensions[0]; + char *in = data[0]; + char *out = data[1]; + npy_intp istride = strides[0]; + npy_intp ostride = strides[1]; + + while (N--) { + PyObject *obj = *reinterpret_cast(in); + PyObject *attr; + if (PyObject_GetOptionalAttr(obj, npy_interned_str.*component, &attr) < 0) { + return -1; + } + if (attr == NULL) { + if constexpr (component == &npy_interned_str_struct::real) { + attr = Py_NewRef(obj); // just use the old object... + } + else { + // Use long zero as a best bet (also historical value) + attr = PyLong_FromLong(0); + if (attr == NULL) { + return -1; + } + } + } + + Py_XSETREF((*reinterpret_cast(out)), attr); + in += istride; + out += ostride; + } + return 0; +} + + +template +static int +register_one_for_type( + const char *name, PyArray_DTypeMeta *complex_dtype, PyArray_DTypeMeta *real_dtype) +{ + PyArray_DTypeMeta *dtypes[2] = {complex_dtype, real_dtype}; + PyType_Slot meth_slots[] = { + {NPY_METH_resolve_descriptors, (void *)&complex_to_real_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&extract_complex_part_loop}, + {0, NULL} + }; + PyArrayMethod_Spec meth_spec; + meth_spec.name = "generic_real_imag_loop"; + meth_spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + meth_spec.nin = 1; + meth_spec.nout = 1; + meth_spec.dtypes = dtypes; + meth_spec.slots = meth_slots; + meth_spec.casting = NPY_NO_CASTING; + + PyUFunc_LoopSlot slots[] = { + {name, &meth_spec}, + {0, nullptr} + }; + return PyUFunc_AddLoopsFromSpecs(slots); +} + + +template +static int +register_both_for_type(PyArray_DTypeMeta *complex_dtype, PyArray_DTypeMeta *real_dtype) { + if (register_one_for_type("real", complex_dtype, real_dtype) < 0) { + return -1; + } + if (register_one_for_type("imag", complex_dtype, real_dtype) < 0) { + return -1; + } + return 0; +} + + +template +static int +register_one_object_loop(const char *name) +{ + PyArray_DTypeMeta *dtypes[2] = {&PyArray_ObjectDType, &PyArray_ObjectDType}; + PyType_Slot meth_slots[] = { + {NPY_METH_strided_loop, (void *)&object_get_comp_strided_loop}, + {0, nullptr} + }; + PyArrayMethod_Spec meth_spec; + meth_spec.name = "object_real_imag_loop"; + meth_spec.flags = (NPY_ARRAYMETHOD_FLAGS)( + NPY_METH_NO_FLOATINGPOINT_ERRORS|NPY_METH_REQUIRES_PYAPI); + meth_spec.nin = 1; + meth_spec.nout = 1; + meth_spec.dtypes = dtypes; + meth_spec.slots = meth_slots; + meth_spec.casting = NPY_NO_CASTING; + PyUFunc_LoopSlot slots[] = { + {name, &meth_spec}, + {0, nullptr} + }; + return PyUFunc_AddLoopsFromSpecs(slots); +} + + +template +static int +real_imag_promoter(PyObject *ufunc, + PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + PyBoundArrayMethodObject *meth = NPY_DT_SLOTS(op_dtypes[0])->*slot; + if (meth == NULL) { + return -1; // nothing to do. + } + if (signature[1] != NULL && signature[1] != meth->dtypes[1]) { + // out signature requested, but not compatible (may be unreachable). + return -1; + } + + /* + * Dynamically add the loop to the ufunc, since it seem it was missing. + */ + PyObject *DType_tuple = PyTuple_FromArray((PyObject **)meth->dtypes, 2); + if (DType_tuple == NULL) { + return -1; + } + PyObject *info = PyTuple_Pack(2, DType_tuple, meth->method); + Py_DECREF(DType_tuple); + if (info == NULL) { + return -1; + } + int res = PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 1); + Py_DECREF(info); + if (res < 0) { + return -1; + } + new_op_dtypes[0] = NPY_DT_NewRef(meth->dtypes[0]); + new_op_dtypes[1] = NPY_DT_NewRef(meth->dtypes[1]); + return 1; +} + + + +template +static int +add_promoter_for_slot(PyObject *ufunc) +{ + PyObject *promoter = PyCapsule_New( + (void *)real_imag_promoter, "numpy._ufunc_promoter", NULL); + if (promoter == NULL) { + return -1; + } + PyObject *dtypes[2] = {(PyObject *)&PyArrayDescr_Type, (PyObject *)&PyArrayDescr_Type}; + PyObject *info = PyTuple_FromArray(dtypes, 2); + if (info == NULL) { + Py_DECREF(promoter); + return -1; + } + int res = PyUFunc_AddPromoter(ufunc, info, promoter); + Py_DECREF(info); + Py_DECREF(promoter); + return res; +} + + +NPY_NO_EXPORT int +init_real_imag_ufuncs(PyObject *umath) +{ + int res = -1; + PyObject *real_ufunc = PyObject_GetAttr(umath, npy_interned_str.real); + PyObject *imag_ufunc = PyObject_GetAttr(umath, npy_interned_str.imag); + if (real_ufunc == NULL || imag_ufunc == NULL) { + goto finish; + } + + if (register_both_for_type(&PyArray_CFloatDType, &PyArray_FloatDType) < 0) { + goto finish; + } + if (register_both_for_type(&PyArray_CDoubleDType, &PyArray_DoubleDType) < 0) { + goto finish; + } + if (register_both_for_type(&PyArray_CLongDoubleDType, &PyArray_LongDoubleDType) < 0) { + goto finish; + } + if (register_one_object_loop<&npy_interned_str_struct::real>("real") < 0) { + goto finish; + } + if (register_one_object_loop<&npy_interned_str_struct::imag>("imag") < 0) { + goto finish; + } + + /* + * The above actually only adds the method to the DType itself. We deal with + * the ufunc by adding a general fall-back method that dynamically registers + * loops based on the above DType method slots. + */ + if (add_promoter_for_slot<&NPY_DType_Slots::real_meth>(real_ufunc) < 0) { + goto finish; + } + if (add_promoter_for_slot<&NPY_DType_Slots::imag_meth>(imag_ufunc) < 0) { + goto finish; + } + res = 0; + finish: + Py_XDECREF(real_ufunc); + Py_XDECREF(imag_ufunc); + + return res; +} diff --git a/numpy/_core/src/umath/real_imag_ufuncs.h b/numpy/_core/src/umath/real_imag_ufuncs.h new file mode 100644 index 000000000000..4e6d569a92cc --- /dev/null +++ b/numpy/_core/src/umath/real_imag_ufuncs.h @@ -0,0 +1,15 @@ +#ifndef _NPY_CORE_SRC_UMATH_REAL_IMAG_UFUNCS_H_ +#define _NPY_CORE_SRC_UMATH_REAL_IMAG_UFUNCS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +NPY_NO_EXPORT int +init_real_imag_ufuncs(PyObject *umath); + +#ifdef __cplusplus +} +#endif + +#endif /* _NPY_CORE_SRC_UMATH_REAL_IMAG_UFUNCS_H_ */ diff --git a/numpy/_core/src/umath/reduction.c b/numpy/_core/src/umath/reduction.c index 384ac052b226..321a1f841853 100644 --- a/numpy/_core/src/umath/reduction.c +++ b/numpy/_core/src/umath/reduction.c @@ -165,7 +165,7 @@ PyArray_CopyInitialReduceValues( * funcname : The name of the reduction function, for error messages. * errormask : forwarded from _get_bufsize_errmask * - * TODO FIXME: if you squint, this is essentially an second independent + * TODO FIXME: if you squint, this is essentially a second independent * implementation of generalized ufuncs with signature (i)->(), plus a few * extra bells and whistles. (Indeed, as far as I can tell, it was originally * split out to support a fancy version of count_nonzero... which is not diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index e2d7c22f5deb..c3c1416d5618 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -700,7 +700,7 @@ static inline int * - The reverse should work, so we return NotImplemented to defer. * (If self is a subclass, this will end up in the "unknown" path.) * - Neither works (e.g. `uint8 + int8`): We currently use the array path. - * - The other object is a unknown. It could be either a scalar, an array, + * - The other object is an unknown. It could be either a scalar, an array, * or an array-like (including a list!). Because NumPy scalars pretend to be * arrays we fall into the array fallback path here _normally_ (through * the generic scalar path). diff --git a/numpy/_core/src/umath/special_integer_comparisons.cpp b/numpy/_core/src/umath/special_integer_comparisons.cpp index 06babeeda0a8..a2efbffa05a1 100644 --- a/numpy/_core/src/umath/special_integer_comparisons.cpp +++ b/numpy/_core/src/umath/special_integer_comparisons.cpp @@ -106,7 +106,7 @@ get_min_max(int typenum, long long *min, unsigned long long *max) /* * Determine if a Python long is within the typenums range, smaller, or larger. - * + * * Function returns -1 for errors. */ static inline int @@ -141,7 +141,7 @@ get_value_range(PyObject *value, int type_num, int *range) } else { /* - * If we are checking for unisgned long long, the value may be larger + * If we are checking for unsigned long long, the value may be larger * then long long, but within range of unsigned long long. Check this * by doing the normal Python integer comparison. */ @@ -344,7 +344,7 @@ add_dtype_loops(PyObject *umath, PyArrayMethod_Spec *spec, PyObject *info) goto fail; } - /* + /* * NOTE: Iterates all type numbers, it would be nice to reduce this. * (that would be easier if we consolidate int DTypes in general.) */ diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index 1e7bea49a365..cb674484a582 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -14,9 +14,6 @@ #include "string_fastsearch.h" #include "gil_utils.h" -#define CHECK_OVERFLOW(index) if (buf + (index) >= after) return 0 -#define MSB(val) ((val) >> 7 & 1) - #ifdef _MSC_VER // MSVC sometimes complains (C4715: "not all control paths return a value") // on switch statements over enum classes, even though all enum values are covered. diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 9b3d86c25301..18ed4534ea04 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -176,9 +176,14 @@ string_multiply(Buffer buf1, npy_int64 reps, Buffer out) return 0; } + size_t width = out.buffer_width(); + // we know this is positive + size_t reps_ = (size_t)reps; + if (len1 == 1) { - out.buffer_memset(*buf1, reps); - out.buffer_fill_with_zeros_after_index(reps); + size_t end_index = reps_ > width ? width : reps_; + out.buffer_memset(*buf1, end_index); + out.buffer_fill_with_zeros_after_index(end_index); return 0; } @@ -188,7 +193,6 @@ string_multiply(Buffer buf1, npy_int64 reps, Buffer out) } size_t pad = 0; - size_t width = out.buffer_width(); if (width < newlen) { reps = width / len1; pad = width % len1; @@ -1125,9 +1129,9 @@ string_partition_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING string_partition_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), - PyArray_Descr *const given_descrs[3], - PyArray_Descr *loop_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[6]), + PyArray_Descr *const given_descrs[6], + PyArray_Descr *loop_descrs[6], npy_intp *NPY_UNUSED(view_offset)) { if (!given_descrs[3] || !given_descrs[4] || !given_descrs[5]) { diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index ebc10586bf8b..169c789a5651 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1119,6 +1119,11 @@ string_lrstrip_chars_strided_loop( } { char *new_buf = (char *)PyMem_RawCalloc(s1.size, 1); + if (new_buf == NULL) { + npy_gil_error(PyExc_MemoryError, + "Failed to allocate string in %s", ufunc_name); + goto fail; + } Buffer buf1((char *)s1.buf, s1.size); Buffer buf2((char *)s2.buf, s2.size); Buffer outbuf(new_buf, s1.size); @@ -1241,6 +1246,11 @@ string_lrstrip_whitespace_strided_loop( } { char *new_buf = (char *)PyMem_RawCalloc(s.size, 1); + if (new_buf == NULL) { + npy_gil_error(PyExc_MemoryError, + "Failed to allocate string in %s", ufunc_name); + goto fail; + } Buffer buf((char *)s.buf, s.size); Buffer outbuf(new_buf, s.size); size_t new_buf_size = string_lrstrip_whitespace( @@ -1249,6 +1259,7 @@ string_lrstrip_whitespace_strided_loop( if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { npy_gil_error(PyExc_MemoryError, "Failed to pack string in %s", ufunc_name); + PyMem_RawFree(new_buf); goto fail; } @@ -1444,18 +1455,30 @@ string_replace_strided_loop( Buffer buf3((char *)i3s.buf, i3s.size); // conservatively overallocate - // TODO check overflow - size_t max_size; + size_t num_repl, growth; if (i2s.size == 0) { // interleaving - max_size = i1s.size + (i1s.size + 1)*(i3s.size); + num_repl = (size_t)i1s.size + 1; + growth = i3s.size; } else { // replace i2 with i3 - size_t change = i2s.size >= i3s.size ? 0 : i3s.size - i2s.size; - max_size = i1s.size + count * change; + num_repl = (size_t)count; + growth = i2s.size >= i3s.size ? 0 : i3s.size - i2s.size; + } + char *new_buf = NULL; + size_t max_size; + if (!npy_mul_with_overflow_size_t(&max_size, num_repl, growth)) { + max_size += i1s.size; + if (max_size >= i1s.size) { + new_buf = (char *)PyMem_RawCalloc(max_size, 1); + } + } + if (new_buf == NULL) { + npy_gil_error(PyExc_MemoryError, + "Failed to allocate string in replace"); + goto fail; } - char *new_buf = (char *)PyMem_RawCalloc(max_size, 1); Buffer outbuf(new_buf, max_size); size_t new_buf_size = string_replace( @@ -1463,6 +1486,7 @@ string_replace_strided_loop( if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { npy_gil_error(PyExc_MemoryError, "Failed to pack string in replace"); + PyMem_RawFree(new_buf); goto fail; } @@ -1575,6 +1599,11 @@ string_expandtabs_strided_loop(PyArrayMethod_Context *context, } char *new_buf = (char *)PyMem_RawCalloc(new_buf_size, 1); + if (new_buf == NULL) { + npy_gil_error(PyExc_MemoryError, + "Failed to allocate string in expandtabs"); + goto fail; + } Buffer outbuf(new_buf, new_buf_size); string_expandtabs(buf, tabsize, outbuf); @@ -1582,6 +1611,7 @@ string_expandtabs_strided_loop(PyArrayMethod_Context *context, if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { npy_gil_error( PyExc_MemoryError, "Failed to pack string in expandtabs"); + PyMem_RawFree(new_buf); goto fail; } @@ -1928,9 +1958,9 @@ zfill_strided_loop(PyArrayMethod_Context *context, static NPY_CASTING string_partition_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), - PyArray_Descr *const given_descrs[3], - PyArray_Descr *loop_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[5]), + PyArray_Descr *const given_descrs[5], + PyArray_Descr *loop_descrs[5], npy_intp *NPY_UNUSED(view_offset)) { if (given_descrs[2] || given_descrs[3] || given_descrs[4]) { diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 63ac438eabc4..7bc524bd6040 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -142,21 +142,16 @@ PyUFunc_clearfperr() NPY_ITER_NO_SUBTYPE | \ NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE -/* Called at module initialization to set the matmul ufunc output flags */ +/* Called at module initialization to set the matmul family gufunc output flags */ NPY_NO_EXPORT int set_matmul_flags(PyObject *d) { - PyObject *matmul = NULL; - int result = PyDict_GetItemStringRef(d, "matmul", &matmul); - if (result <= 0) { - // caller sets an error if one isn't already set - return -1; - } /* * The default output flag NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE allows * perfectly overlapping input and output (in-place operations). While * correct for the common mathematical operations, this assumption is - * incorrect in the general case and specifically in the case of matmul. + * incorrect in the general case and specifically in the case of matmul, + * matvec, and vecmat. * * NPY_ITER_UPDATEIFCOPY is added by default in * PyUFunc_GeneralizedFunction, which is the variant called for gufuncs @@ -164,11 +159,22 @@ set_matmul_flags(PyObject *d) * * Enabling NPY_ITER_WRITEONLY can prevent a copy in some cases. */ - ((PyUFuncObject *)matmul)->op_flags[2] = (NPY_ITER_WRITEONLY | - NPY_ITER_UPDATEIFCOPY | - NPY_UFUNC_DEFAULT_OUTPUT_FLAGS) & - ~NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE; - Py_DECREF(matmul); + npy_uint32 flags = (NPY_ITER_WRITEONLY | + NPY_ITER_UPDATEIFCOPY | + NPY_UFUNC_DEFAULT_OUTPUT_FLAGS) & + ~NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE; + + const char *names[] = {"matmul", "matvec", "vecmat"}; + for (int i = 0; i < 3; i++) { + PyObject *ufunc = NULL; + int result = PyDict_GetItemStringRef(d, names[i], &ufunc); + if (result <= 0) { + // caller sets an error if one isn't already set + return -1; + } + ((PyUFuncObject *)ufunc)->op_flags[2] = flags; + Py_DECREF(ufunc); + } return 0; } @@ -321,9 +327,11 @@ _parse_signature(PyUFuncObject *ufunc, const char *signature) } len = strlen(signature); ufunc->core_signature = PyArray_malloc(sizeof(char) * (len+1)); - if (ufunc->core_signature) { - strcpy(ufunc->core_signature, signature); + if (ufunc->core_signature == NULL) { + PyErr_NoMemory(); + return -1; } + strcpy(ufunc->core_signature, signature); /* Allocate sufficient memory to store pointers to all dimension names */ var_names = PyArray_malloc(sizeof(char const*) * len); if (var_names == NULL) { @@ -473,14 +481,28 @@ _parse_signature(PyUFuncObject *ufunc, const char *signature) parse_error = "incomplete signature: not all arguments found"; goto fail; } - ufunc->core_dim_ixs = PyArray_realloc(ufunc->core_dim_ixs, + void *tmp; + tmp = PyArray_realloc(ufunc->core_dim_ixs, sizeof(int) * cur_core_dim); - ufunc->core_dim_sizes = PyArray_realloc( - ufunc->core_dim_sizes, + if (tmp == NULL) { + PyErr_NoMemory(); + goto fail; + } + ufunc->core_dim_ixs = tmp; + tmp = PyArray_realloc(ufunc->core_dim_sizes, sizeof(npy_intp) * ufunc->core_num_dim_ix); - ufunc->core_dim_flags = PyArray_realloc( - ufunc->core_dim_flags, + if (tmp == NULL) { + PyErr_NoMemory(); + goto fail; + } + ufunc->core_dim_sizes = tmp; + tmp = PyArray_realloc(ufunc->core_dim_flags, sizeof(npy_uint32) * ufunc->core_num_dim_ix); + if (tmp == NULL) { + PyErr_NoMemory(); + goto fail; + } + ufunc->core_dim_flags = tmp; /* check for trivial core-signature, e.g. "(),()->()" */ if (cur_core_dim == 0) { @@ -1384,7 +1406,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, Py_INCREF(op_axes_tuple); } else if (op_ncore == 1) { - op_axes_tuple = PyTuple_Pack(1, op_axes_tuple); + op_axes_tuple = PyTuple_FromArray(&op_axes_tuple, 1); if (op_axes_tuple == NULL) { return -1; } @@ -2738,8 +2760,9 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, else { fixed_strides[0] = PyArray_STRIDES(op[0])[axis]; fixed_strides[1] = PyArray_STRIDES(op[1])[axis]; - fixed_strides[2] = fixed_strides[0]; } + // First argument is also passed as output (e.g. see dataptr below). + fixed_strides[2] = fixed_strides[0]; NPY_ARRAYMETHOD_FLAGS flags = 0; @@ -3411,7 +3434,7 @@ _set_full_args_out(int nout, PyObject *out_obj, ufunc_full_args *full_args) return 0; } /* Can be an array if it only has one output */ - full_args->out = PyTuple_Pack(1, out_obj); + full_args->out = PyTuple_FromArray(&out_obj, 1); if (full_args->out == NULL) { return -1; } @@ -3426,6 +3449,72 @@ _set_full_args_out(int nout, PyObject *out_obj, ufunc_full_args *full_args) return 0; } +static inline int +/* Convert the 'axis' parameter into a list of axes */ +_parse_axis(PyObject *axes_obj, int ndim, int *axes) +{ + int naxes = 0; + if (axes_obj == NULL) { + /* apply defaults */ + if (ndim == 0) { + naxes = 0; + } + else { + naxes = 1; + axes[0] = 0; + } + } + else if (axes_obj == Py_None) { + /* Convert 'None' into all the axes */ + naxes = ndim; + for (int i = 0; i < naxes; ++i) { + axes[i] = i; + } + } + else if (PyTuple_Check(axes_obj)) { + naxes = PyTuple_Size(axes_obj); + if (naxes < 0 || naxes > NPY_MAXDIMS) { + PyErr_SetString(PyExc_ValueError, + "too many values for 'axis'"); + return -1; + } + for (int i = 0; i < naxes; ++i) { + PyObject *tmp = PyTuple_GET_ITEM(axes_obj, i); + int axis = PyArray_PyIntAsInt(tmp); + if (error_converting(axis)) { + return -1; + } + if (check_and_adjust_axis(&axis, ndim) < 0) { + return -1; + } + axes[i] = (int)axis; + } + } + else { + /* Try to interpret axis as an integer */ + int axis = PyArray_PyIntAsInt(axes_obj); + /* TODO: PyNumber_Index would be good to use here */ + if (error_converting(axis)) { + return -1; + } + /* + * As a special case for backwards compatibility in 'sum', + * 'prod', et al, also allow a reduction for scalars even + * though this is technically incorrect. + */ + if (ndim == 0 && (axis == 0 || axis == -1)) { + naxes = 0; + } + else if (check_and_adjust_axis(&axis, ndim) < 0) { + return -1; + } + else { + axes[0] = (int)axis; + naxes = 1; + } + } + return naxes; +} /* forward declaration */ static PyArray_DTypeMeta * _get_dtype(PyObject *dtype_obj); @@ -3439,7 +3528,7 @@ static PyObject * PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames, int operation) { - int i, naxes=0, ndim; + int ndim; int axes[NPY_MAXDIMS]; ufunc_full_args full_args = {NULL, NULL}; @@ -3492,16 +3581,16 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("reduceat", args, len_args, kwnames, - "array", NULL, &op, - "indices", NULL, &indices_obj, - "|axis", NULL, &axes_obj, - "|dtype", NULL, &otype_obj, - "|out", NULL, &out_obj, - NULL, NULL, NULL) < 0) { + {"array", NULL, &op}, + {"indices", NULL, &indices_obj}, + {"|axis", NULL, &axes_obj}, + {"|dtype", NULL, &otype_obj}, + {"|out", NULL, &out_obj}) < 0) { goto fail; } /* Prepare inputs for PyUfunc_CheckOverride */ - full_args.in = PyTuple_Pack(2, op, indices_obj); + PyObject *reduce_in[] = {op, indices_obj}; + full_args.in = PyTuple_FromArray(reduce_in, 2); if (full_args.in == NULL) { goto fail; } @@ -3511,15 +3600,14 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("accumulate", args, len_args, kwnames, - "array", NULL, &op, - "|axis", NULL, &axes_obj, - "|dtype", NULL, &otype_obj, - "|out", NULL, &out_obj, - NULL, NULL, NULL) < 0) { + {"array", NULL, &op}, + {"|axis", NULL, &axes_obj}, + {"|dtype", NULL, &otype_obj}, + {"|out", NULL, &out_obj}) < 0) { goto fail; } /* Prepare input for PyUfunc_CheckOverride */ - full_args.in = PyTuple_Pack(1, op); + full_args.in = PyTuple_FromArray(&op, 1); if (full_args.in == NULL) { goto fail; } @@ -3529,18 +3617,17 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("reduce", args, len_args, kwnames, - "array", NULL, &op, - "|axis", NULL, &axes_obj, - "|dtype", NULL, &otype_obj, - "|out", NULL, &out_obj, - "|keepdims", NULL, &keepdims_obj, - "|initial", &_not_NoValue, &initial, - "|where", NULL, &wheremask_obj, - NULL, NULL, NULL) < 0) { + {"array", NULL, &op}, + {"|axis", NULL, &axes_obj}, + {"|dtype", NULL, &otype_obj}, + {"|out", NULL, &out_obj}, + {"|keepdims", NULL, &keepdims_obj}, + {"|initial", &_not_NoValue, &initial}, + {"|where", NULL, &wheremask_obj}) < 0) { goto fail; } /* Prepare input for PyUfunc_CheckOverride */ - full_args.in = PyTuple_Pack(1, op); + full_args.in = PyTuple_FromArray(&op, 1); if (full_args.in == NULL) { goto fail; } @@ -3556,7 +3643,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, goto fail; } if (out_obj != Py_None) { - full_args.out = PyTuple_Pack(1, out_obj); + full_args.out = PyTuple_FromArray(&out_obj, 1); if (full_args.out == NULL) { goto fail; } @@ -3624,65 +3711,10 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, ndim = PyArray_NDIM(mp); - /* Convert the 'axis' parameter into a list of axes */ - if (axes_obj == NULL) { - /* apply defaults */ - if (ndim == 0) { - naxes = 0; - } - else { - naxes = 1; - axes[0] = 0; - } - } - else if (axes_obj == Py_None) { - /* Convert 'None' into all the axes */ - naxes = ndim; - for (i = 0; i < naxes; ++i) { - axes[i] = i; - } - } - else if (PyTuple_Check(axes_obj)) { - naxes = PyTuple_Size(axes_obj); - if (naxes < 0 || naxes > NPY_MAXDIMS) { - PyErr_SetString(PyExc_ValueError, - "too many values for 'axis'"); - goto fail; - } - for (i = 0; i < naxes; ++i) { - PyObject *tmp = PyTuple_GET_ITEM(axes_obj, i); - int axis = PyArray_PyIntAsInt(tmp); - if (error_converting(axis)) { - goto fail; - } - if (check_and_adjust_axis(&axis, ndim) < 0) { - goto fail; - } - axes[i] = (int)axis; - } - } - else { - /* Try to interpret axis as an integer */ - int axis = PyArray_PyIntAsInt(axes_obj); - /* TODO: PyNumber_Index would be good to use here */ - if (error_converting(axis)) { - goto fail; - } - /* - * As a special case for backwards compatibility in 'sum', - * 'prod', et al, also allow a reduction for scalars even - * though this is technically incorrect. - */ - if (ndim == 0 && (axis == 0 || axis == -1)) { - naxes = 0; - } - else if (check_and_adjust_axis(&axis, ndim) < 0) { - goto fail; - } - else { - axes[0] = (int)axis; - naxes = 1; - } + /* Extract the axis argument */ + int naxes = _parse_axis(axes_obj, ndim, axes); + if (naxes < 0) { + goto fail; } switch(operation) { @@ -4337,18 +4369,9 @@ try_trivial_scalar_call( // Try getting info from the (private) cache. Fall back if not found, // so that the the dtype gets registered and things will work next time. PyArray_DTypeMeta *op_dtypes[2] = {NPY_DTYPE(dt), NULL}; -#ifdef Py_GIL_DISABLED - // Other threads may be in the process of filling the dispatch cache, - // so we need to acquire the free-threading-specific dispatch cache mutex - // before reading the cache - PyObject *info = PyArrayIdentityHash_GetItemWithLock( // borrowed reference. - (PyArrayIdentityHash *)ufunc->_dispatch_cache, - (PyObject **)op_dtypes); -#else PyObject *info = PyArrayIdentityHash_GetItem( // borrowed reference. (PyArrayIdentityHash *)ufunc->_dispatch_cache, (PyObject **)op_dtypes); -#endif if (info == NULL) { goto bail; } @@ -4524,7 +4547,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, "use the `out` keyword argument instead. If you hoped to work with " "more than 2 inputs, combine them into a single array and get the extrema " "for the relevant axis.") < 0) { - return NULL; + goto fail; } } @@ -4545,7 +4568,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, PyObject *keepdims_obj = NULL, *casting_obj = NULL, *order_obj = NULL; PyObject *subok_obj = NULL, *signature_obj = NULL, *sig_obj = NULL; PyObject *dtype_obj = NULL; - /* Typically, NumPy defaults to returnin scalars for 0-D results */ + /* Typically, NumPy defaults to returning scalars for 0-D results */ npy_bool return_scalar = NPY_TRUE; /* Skip parsing if there are no keyword arguments, nothing left to do */ @@ -4554,15 +4577,14 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments(ufunc->name, args + len_args, 0, kwnames, - "$out", NULL, &out_obj, - "$where", NULL, &where_obj, - "$casting", NULL, &casting_obj, - "$order", NULL, &order_obj, - "$subok", NULL, &subok_obj, - "$dtype", NULL, &dtype_obj, - "$signature", NULL, &signature_obj, - "$sig", NULL, &sig_obj, - NULL, NULL, NULL) < 0) { + {"$out", NULL, &out_obj}, + {"$where", NULL, &where_obj}, + {"$casting", NULL, &casting_obj}, + {"$order", NULL, &order_obj}, + {"$subok", NULL, &subok_obj}, + {"$dtype", NULL, &dtype_obj}, + {"$signature", NULL, &signature_obj}, + {"$sig", NULL, &sig_obj}) < 0) { goto fail; } } @@ -4570,17 +4592,16 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments(ufunc->name, args + len_args, 0, kwnames, - "$out", NULL, &out_obj, - "$axes", NULL, &axes_obj, - "$axis", NULL, &axis_obj, - "$keepdims", NULL, &keepdims_obj, - "$casting", NULL, &casting_obj, - "$order", NULL, &order_obj, - "$subok", NULL, &subok_obj, - "$dtype", NULL, &dtype_obj, - "$signature", NULL, &signature_obj, - "$sig", NULL, &sig_obj, - NULL, NULL, NULL) < 0) { + {"$out", NULL, &out_obj}, + {"$axes", NULL, &axes_obj}, + {"$axis", NULL, &axis_obj}, + {"$keepdims", NULL, &keepdims_obj}, + {"$casting", NULL, &casting_obj}, + {"$order", NULL, &order_obj}, + {"$subok", NULL, &subok_obj}, + {"$dtype", NULL, &dtype_obj}, + {"$signature", NULL, &signature_obj}, + {"$sig", NULL, &sig_obj}) < 0) { goto fail; } if (NPY_UNLIKELY((axes_obj != NULL) && (axis_obj != NULL))) { @@ -4638,9 +4659,10 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, } /* Warn if "where" is used without "out", issue 29561 */ - if ((where_obj != NULL) && (full_args.out == NULL) && (out_obj == NULL)) { + if ((where_obj != NULL && where_obj != Py_True) + && (full_args.out == NULL) && (out_obj == NULL)) { if (PyErr_WarnEx(PyExc_UserWarning, - "'where' used without 'out', expect unitialized memory in output. " + "'where' used without 'out', expect uninitialized memory in output. " "If this is intentional, use out=None.", 1) < 0) { goto fail; } @@ -4891,7 +4913,7 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi ufunc->userloops = NULL; ufunc->ptr = NULL; ufunc->vectorcall = &ufunc_generic_vectorcall; - ufunc->reserved1 = 0; + ufunc->_ufunc_flags = 0; ufunc->iter_flags = 0; /* Type resolution and inner loop selection functions */ @@ -4915,7 +4937,7 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi */ ufunc->_dispatch_cache = NULL; } - ufunc->_loops = PyList_New(0); + ufunc->_loops = PyDict_New(); if (ufunc->_loops == NULL) { Py_DECREF(ufunc); return NULL; @@ -5243,21 +5265,18 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, * A new-style loop should not be replaced by an old-style one. */ int add_new_loop = 1; - for (Py_ssize_t j = 0; j < PyList_GET_SIZE(ufunc->_loops); j++) { - PyObject *item = PyList_GET_ITEM(ufunc->_loops, j); // noqa: borrowed-ref OK - PyObject *existing_tuple = PyTuple_GET_ITEM(item, 0); - - int cmp = PyObject_RichCompareBool(existing_tuple, signature_tuple, Py_EQ); - if (cmp < 0) { - goto fail; - } - if (!cmp) { - continue; - } - PyObject *registered = PyTuple_GET_ITEM(item, 1); - if (!PyObject_TypeCheck(registered, &PyArrayMethod_Type) || ( - (PyArrayMethodObject *)registered)->get_strided_loop != - &get_wrapped_legacy_ufunc_loop) { + PyObject *existing_item; + if (PyDict_GetItemRef(ufunc->_loops, signature_tuple, &existing_item) < 0) { + goto fail; + } + if (existing_item != NULL) { + PyObject *registered = PyTuple_GET_ITEM(existing_item, 1); + int not_compatible = ( + !PyObject_TypeCheck(registered, &PyArrayMethod_Type) || + ((PyArrayMethodObject *)registered)->get_strided_loop != + &get_wrapped_legacy_ufunc_loop); + Py_DECREF(existing_item); + if (not_compatible) { PyErr_Format(PyExc_TypeError, "A non-compatible loop was already registered for " "ufunc %s and DTypes %S.", @@ -5266,7 +5285,6 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, } /* The loop was already added */ add_new_loop = 0; - break; } if (add_new_loop) { PyObject *info = add_and_return_legacy_wrapping_ufunc_loop( @@ -6219,11 +6237,10 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, npy_bool reduction = NPY_FALSE; if (npy_parse_arguments("resolve_dtypes", args, len_args, kwnames, - "", NULL, &descrs_tuple, - "$signature", NULL, &signature_obj, - "$casting", &PyArray_CastingConverter, &casting, - "$reduction", &PyArray_BoolConverter, &reduction, - NULL, NULL, NULL) < 0) { + {"", NULL, &descrs_tuple}, + {"$signature", NULL, &signature_obj}, + {"$casting", &PyArray_CastingConverter, &casting}, + {"$reduction", &PyArray_BoolConverter, &reduction}) < 0) { return NULL; } @@ -6430,7 +6447,8 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, ((PyArray_Descr **)context->descriptors)[i] = operation_descrs[i]; } - result = PyTuple_Pack(2, result_dtype_tuple, capsule); + PyObject *result_items[] = {result_dtype_tuple, capsule}; + result = PyTuple_FromArray(result_items, 2); /* cleanup and return */ Py_DECREF(capsule); @@ -6474,9 +6492,8 @@ py_get_strided_loop(PyUFuncObject *ufunc, npy_intp fixed_strides[NPY_MAXARGS]; if (npy_parse_arguments("_get_strided_loop", args, len_args, kwnames, - "", NULL, &call_info_obj, - "$fixed_strides", NULL, &fixed_strides_obj, - NULL, NULL, NULL) < 0) { + {"", NULL, &call_info_obj}, + {"$fixed_strides", NULL, &fixed_strides_obj}) < 0) { return NULL; } @@ -6687,6 +6704,10 @@ ufunc_get_types(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) return NULL; } t = PyArray_malloc(no+ni+2); + if (t == NULL) { + Py_DECREF(list); + return PyErr_NoMemory(); + } n = 0; for (k = 0; k < nt; k++) { for (j = 0; jtype_num == NPY_TIMEDELTA) { + out_dtypes[0] = NPY_DT_CALL_ensure_canonical(PyArray_DESCR(operands[0])); + if (out_dtypes[0] == NULL) { + return -1; + } + out_dtypes[1] = PyArray_DescrFromType(NPY_DOUBLE); + return 0; + } + else { + return PyUFunc_SimpleUniformOperationTypeResolver(ufunc, casting, + operands, type_tup, out_dtypes); + } +} + /* * The ones_like function shouldn't really be a ufunc, but while it @@ -584,6 +612,9 @@ PyUFunc_SimpleUniformOperationTypeResolver( descr = PyArray_DESCR(operands[0]); } out_dtypes[0] = NPY_DT_CALL_ensure_canonical(descr); + if (out_dtypes[0] == NULL) { + return -1; + } } /* All types are the same - copy the first one to the rest */ @@ -650,6 +681,9 @@ PyUFunc_IsNaTTypeResolver(PyUFuncObject *ufunc, } out_dtypes[0] = NPY_DT_CALL_ensure_canonical(PyArray_DESCR(operands[0])); + if (out_dtypes[0] == NULL) { + return -1; + } out_dtypes[1] = PyArray_DescrFromType(NPY_BOOL); return 0; @@ -669,6 +703,9 @@ PyUFunc_IsFiniteTypeResolver(PyUFuncObject *ufunc, } out_dtypes[0] = NPY_DT_CALL_ensure_canonical(PyArray_DESCR(operands[0])); + if (out_dtypes[0] == NULL) { + return -1; + } out_dtypes[1] = PyArray_DescrFromType(NPY_BOOL); return 0; @@ -1121,7 +1158,7 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc, return -1; } - // This is wrong agaian cause of elsize, but only the DType matters + // This is wrong again because of elsize, but only the DType matters // here (String or Unicode). out_dtypes[2] = out_dtypes[1]; Py_INCREF(out_dtypes[1]); diff --git a/numpy/_core/src/umath/ufunc_type_resolution.h b/numpy/_core/src/umath/ufunc_type_resolution.h index 9e812e97d6fe..531e9267afa2 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.h +++ b/numpy/_core/src/umath/ufunc_type_resolution.h @@ -19,6 +19,13 @@ PyUFunc_NegativeTypeResolver(PyUFuncObject *ufunc, PyObject *type_tup, PyArray_Descr **out_dtypes); +NPY_NO_EXPORT int +PyUFunc_SignTypeResolver(PyUFuncObject *ufunc, + NPY_CASTING casting, + PyArrayObject **operands, + PyObject *type_tup, + PyArray_Descr **out_dtypes); + NPY_NO_EXPORT int PyUFunc_OnesLikeTypeResolver(PyUFuncObject *ufunc, NPY_CASTING casting, diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index 3efb02bd4a49..439a2ac308d9 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -31,6 +31,7 @@ #include "string_ufuncs.h" #include "stringdtype_ufuncs.h" #include "special_integer_comparisons.h" +#include "real_imag_ufuncs.h" #include "extobj.h" /* for _extobject_contextvar exposure */ #include "ufunc_type_resolution.h" @@ -163,53 +164,6 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { return (PyObject *)self; } -/* docstring in numpy.add_newdocs.py */ -PyObject * -add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - - /* 2024-11-12, NumPy 2.2 */ - if (DEPRECATE("_add_newdoc_ufunc is deprecated. " - "Use `ufunc.__doc__ = newdoc` instead.") < 0) { - return NULL; - } - - PyUFuncObject *ufunc; - PyObject *str; - if (!PyArg_ParseTuple(args, "O!O!:_add_newdoc_ufunc", &PyUFunc_Type, &ufunc, - &PyUnicode_Type, &str)) { - return NULL; - } - if (ufunc->doc != NULL) { - PyErr_SetString(PyExc_ValueError, - "Cannot change docstring of ufunc with non-NULL docstring"); - return NULL; - } - - PyObject *tmp = PyUnicode_AsUTF8String(str); - if (tmp == NULL) { - return NULL; - } - char *docstr = PyBytes_AS_STRING(tmp); - - /* - * This introduces a memory leak, as the memory allocated for the doc - * will not be freed even if the ufunc itself is deleted. In practice - * this should not be a problem since the user would have to - * repeatedly create, document, and throw away ufuncs. - */ - char *newdocstr = malloc(strlen(docstr) + 1); - if (!newdocstr) { - Py_DECREF(tmp); - return PyErr_NoMemory(); - } - strcpy(newdocstr, docstr); - ufunc->doc = newdocstr; - - Py_DECREF(tmp); - Py_RETURN_NONE; -} - /* ***************************************************************************** @@ -321,6 +275,10 @@ int initumath(PyObject *m) return -1; } + if (init_real_imag_ufuncs(m) < 0) { + return -1; + } + if (init_stringdtype_ufuncs(m) < 0) { return -1; } diff --git a/numpy/_core/src/umath/wrapping_array_method.c b/numpy/_core/src/umath/wrapping_array_method.c index 924bac9524e9..33561153b2a9 100644 --- a/numpy/_core/src/umath/wrapping_array_method.c +++ b/numpy/_core/src/umath/wrapping_array_method.c @@ -206,7 +206,7 @@ wrapping_method_get_identity_function( int res = context->method->wrapped_meth->get_reduction_initial( &orig_context, reduction_is_empty, item); for (int i = 0; i < nin + nout; i++) { - Py_DECREF(orig_descrs); + Py_DECREF(orig_descrs[i]); } return res; } @@ -235,6 +235,7 @@ PyUFunc_AddWrappingLoop(PyObject *ufunc_obj, PyObject *wrapped_dt_tuple = NULL; PyObject *new_dt_tuple = NULL; PyArrayMethodObject *meth = NULL; + PyObject *existing_info = NULL; if (!PyObject_TypeCheck(ufunc_obj, &PyUFunc_Type)) { PyErr_SetString(PyExc_TypeError, @@ -249,28 +250,19 @@ PyUFunc_AddWrappingLoop(PyObject *ufunc_obj, } PyArrayMethodObject *wrapped_meth = NULL; - PyObject *loops = ufunc->_loops; - Py_ssize_t length = PyList_Size(loops); - for (Py_ssize_t i = 0; i < length; i++) { - PyObject *item = PyList_GetItemRef(loops, i); - PyObject *cur_DType_tuple = PyTuple_GetItem(item, 0); - Py_DECREF(item); - int cmp = PyObject_RichCompareBool(cur_DType_tuple, wrapped_dt_tuple, Py_EQ); - if (cmp < 0) { - goto finish; - } - if (cmp == 0) { - continue; - } - wrapped_meth = (PyArrayMethodObject *)PyTuple_GET_ITEM(item, 1); - if (!PyObject_TypeCheck(wrapped_meth, &PyArrayMethod_Type)) { + if (PyDict_GetItemRef(ufunc->_loops, wrapped_dt_tuple, &existing_info) < 0) { + goto finish; + } + if (existing_info != NULL) { + PyObject *existing_meth = PyTuple_GET_ITEM(existing_info, 1); + if (!PyObject_TypeCheck(existing_meth, &PyArrayMethod_Type)) { PyErr_SetString(PyExc_TypeError, "Matching loop was not an ArrayMethod."); goto finish; } - break; + wrapped_meth = (PyArrayMethodObject *)existing_meth; } - if (wrapped_meth == NULL) { + else { PyErr_Format(PyExc_TypeError, "Did not find the to-be-wrapped loop in the ufunc with given " "DTypes. Received wrapping types: %S", wrapped_dt_tuple); @@ -336,5 +328,6 @@ PyUFunc_AddWrappingLoop(PyObject *ufunc_obj, Py_XDECREF(wrapped_dt_tuple); Py_XDECREF(new_dt_tuple); Py_XDECREF(meth); + Py_XDECREF(existing_info); return res; } diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index e9fa7f58e3ea..db72d6819ba1 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -224,7 +224,7 @@ def mod(a, values): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``bytes_`` or ``str_`` dtype values : array_like of values These values will be element-wise interpolated into the string. @@ -263,7 +263,7 @@ def find(a, sub, start=0, end=None): ---------- a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype - sub : array_like, with `np.bytes_` or `np.str_` dtype + sub : array_like, with ``bytes_`` or ``str_`` dtype The substring to search for. start, end : array_like, with any integer dtype @@ -375,9 +375,9 @@ def rindex(a, sub, start=0, end=None): Parameters ---------- - a : array-like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``bytes_`` or ``str_`` dtype - sub : array-like, with `np.bytes_` or `np.str_` dtype + sub : array-like, with ``bytes_`` or ``str_`` dtype start, end : array-like, with any integer dtype, optional @@ -1689,7 +1689,7 @@ def translate(a, table, deletechars=None): Parameters ---------- - a : array-like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``bytes_`` or ``str_`` dtype table : str of length 256 diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index 270760412670..f5f3f13122dc 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -1,6 +1,7 @@ -from typing import TypeAlias, overload +from typing import overload import numpy as np +from numpy import add, equal, greater, greater_equal, less, less_equal, not_equal from numpy._globals import _NoValueType from numpy._typing import ( NDArray, @@ -13,6 +14,20 @@ from numpy._typing import ( _SupportsArray, ) +from .defchararray import ( + isalnum, + isalpha, + isdecimal, + isdigit, + islower, + isnumeric, + isspace, + istitle, + isupper, + mod, + str_len, +) + __all__ = [ "add", "capitalize", @@ -62,60 +77,9 @@ __all__ = [ "slice", ] -_StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] -_StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] -_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_AnyShape, np.dtype[np.str_]] | _StringDTypeArray - -@overload -def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... -@overload -def equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... -@overload -def equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... - -@overload -def not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... -@overload -def not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... -@overload -def not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... - -@overload -def greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... -@overload -def greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... -@overload -def greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... - -@overload -def less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... -@overload -def less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... -@overload -def less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... - -@overload -def greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... -@overload -def greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... -@overload -def greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... - -@overload -def less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... -@overload -def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... -@overload -def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... - -@overload -def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ... -@overload -def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... -@overload -def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... -@overload -def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... +type _StringDTypeArray = np.ndarray[_AnyShape, np.dtypes.StringDType] +type _StringDTypeSupportsArray = _SupportsArray[np.dtypes.StringDType] +type _StringDTypeOrUnicodeArray = NDArray[np.str_] | _StringDTypeArray @overload def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... @@ -126,27 +90,6 @@ def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ... @overload def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ... -@overload -def mod(a: U_co, value: object) -> NDArray[np.str_]: ... -@overload -def mod(a: S_co, value: object) -> NDArray[np.bytes_]: ... -@overload -def mod(a: _StringDTypeSupportsArray, value: object) -> _StringDTypeArray: ... -@overload -def mod(a: T_co, value: object) -> _StringDTypeOrUnicodeArray: ... - -def isalpha(x: UST_co) -> NDArray[np.bool]: ... -def isalnum(a: UST_co) -> NDArray[np.bool]: ... -def isdigit(x: UST_co) -> NDArray[np.bool]: ... -def isspace(x: UST_co) -> NDArray[np.bool]: ... -def isdecimal(x: U_co | T_co) -> NDArray[np.bool]: ... -def isnumeric(x: U_co | T_co) -> NDArray[np.bool]: ... -def islower(a: UST_co) -> NDArray[np.bool]: ... -def istitle(a: UST_co) -> NDArray[np.bool]: ... -def isupper(a: UST_co) -> NDArray[np.bool]: ... - -def str_len(x: UST_co) -> NDArray[np.int_]: ... - @overload def find( a: U_co, diff --git a/numpy/_core/tests/examples/cython/checks.pyx b/numpy/_core/tests/examples/cython/checks.pyx index f0f427d2167f..9e4cdd3c10e1 100644 --- a/numpy/_core/tests/examples/cython/checks.pyx +++ b/numpy/_core/tests/examples/cython/checks.pyx @@ -4,6 +4,7 @@ Functions in this module give python-space wrappers for cython functions exposed in numpy/__init__.pxd, so they can be tested in test_cython.py """ +import numpy as np cimport numpy as cnp cnp.import_array() @@ -142,7 +143,7 @@ def get_dtype_flags(cnp.dtype dtype): cdef cnp.NpyIter* npyiter_from_nditer_obj(object it): - """A function to create a NpyIter struct from a nditer object. + """A function to create a NpyIter struct from an nditer object. This function is only meant for testing purposes and only extracts the necessary info from nditer to test the functionality of NpyIter methods @@ -248,6 +249,7 @@ def test_get_multi_index_iter_next(it: "nditer", cnp.ndarray[cnp.float64_t, ndim cnp.NpyIter_GetGetMultiIndex(cit, NULL) cdef cnp.NpyIter_IterNextFunc _iternext = \ cnp.NpyIter_GetIterNext(cit, NULL) + cnp.NpyIter_Deallocate(cit) return 1 @@ -371,3 +373,9 @@ def check_npy_uintp_type_enum(): # Regression test for gh-27890: cnp.NPY_UINTP was not defined. # Cython would fail to compile this before gh-27890 was fixed. return cnp.NPY_UINTP > 0 + + +def resize_refcheck_test(): + # see gh-30991 + a = np.array([[0, 1], [2, 3]], order='C') + a.resize((2, 1)) diff --git a/numpy/_core/tests/examples/cython/setup.py b/numpy/_core/tests/examples/cython/setup.py index eb57477fc2a1..ba0639ebcf1c 100644 --- a/numpy/_core/tests/examples/cython/setup.py +++ b/numpy/_core/tests/examples/cython/setup.py @@ -4,10 +4,10 @@ """ import os -from distutils.core import setup import Cython from Cython.Build import cythonize +from distutils.core import setup from setuptools.extension import Extension import numpy as np diff --git a/numpy/_core/tests/examples/limited_api/limited_api1.c b/numpy/_core/tests/examples/limited_api/limited_api1.c index 3dbf5698f1d4..115a3f3a6835 100644 --- a/numpy/_core/tests/examples/limited_api/limited_api1.c +++ b/numpy/_core/tests/examples/limited_api/limited_api1.c @@ -1,5 +1,3 @@ -#define Py_LIMITED_API 0x03060000 - #include #include #include diff --git a/numpy/_core/tests/examples/limited_api/meson.build b/numpy/_core/tests/examples/limited_api/meson.build index 65287d8654f5..2348b0856d0f 100644 --- a/numpy/_core/tests/examples/limited_api/meson.build +++ b/numpy/_core/tests/examples/limited_api/meson.build @@ -1,4 +1,8 @@ -project('checks', 'c', 'cython') +project( + 'checks', + 'c', 'cython', + meson_version: '>=1.8.3', +) py = import('python').find_installation(pure: false) @@ -31,7 +35,7 @@ py.extension_module( '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', ], include_directories: [npy_include_path], - limited_api: '3.6', + limited_api: '3.9', ) py.extension_module( @@ -55,5 +59,5 @@ py.extension_module( '-DCYTHON_LIMITED_API=1', ], include_directories: [npy_include_path], - limited_api: '3.7', + limited_api: '3.9', ) diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index 216a2c75afb8..f3c10196eb13 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -475,7 +475,7 @@ def test_copyto_cast_safety(): np.copyto(np.arange(3, dtype=np.float32), 2e300, casting="safe") -def test_copyto_permut(): +def test_copyto_permute(): # test explicit overflow case pad = 500 l = [True] * pad + [True, True, True, True] diff --git a/numpy/_core/tests/test_array_api_info.py b/numpy/_core/tests/test_array_api_info.py index 4842dbfa9486..6872e8000ce5 100644 --- a/numpy/_core/tests/test_array_api_info.py +++ b/numpy/_core/tests/test_array_api_info.py @@ -110,4 +110,4 @@ def test_dtypes_invalid_device(): def test_devices(): - assert info.devices() == ["cpu"] + assert info.devices() == ("cpu",) diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index ba28e886282f..03397b6fd9e9 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -12,7 +12,7 @@ import numpy as np import numpy._core._multiarray_umath as ncu from numpy._core._rational_tests import rational -from numpy.testing import IS_64BIT, IS_PYPY, assert_array_equal +from numpy.testing import IS_64BIT, assert_array_equal def arraylikes(): @@ -121,11 +121,11 @@ def scalar_instances(times=True, extended_precision=True, user_dtype=True): if times: # Datetimes and timedelta - yield param(np.timedelta64(2), id="timedelta64[generic]") + yield param(np.timedelta64(2, "ns"), id="timedelta64[ns]") yield param(np.timedelta64(23, "s"), id="timedelta64[s]") yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)") - yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)") + yield param(np.datetime64("NaT", "D"), id="datetime64[D](NaT)") yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]") # Strings and unstructured void: @@ -282,7 +282,6 @@ def test_scalar_coercion(self, scalar): assert_array_equal(arr, arr3) assert_array_equal(arr, arr4) - @pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy") @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") @pytest.mark.parametrize("cast_to", scalar_instances()) def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to): @@ -402,7 +401,7 @@ class TestTimeScalars: @pytest.mark.parametrize("scalar", [param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"), param(np.timedelta64(123, "s"), id="timedelta64[s]"), - param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"), + param(np.datetime64("NaT", "D"), id="datetime64[D](NaT)"), param(np.datetime64(1, "D"), id="datetime64[D]")],) def test_coercion_basic(self, dtype, scalar): # Note the `[scalar]` is there because np.array(scalar) uses stricter @@ -422,12 +421,11 @@ def test_coercion_basic(self, dtype, scalar): assert_array_equal(ass, cast) @pytest.mark.parametrize("dtype", [np.int64, np.float32]) - @pytest.mark.parametrize("scalar", - [param(np.timedelta64(123, "ns"), id="timedelta64[ns]"), - param(np.timedelta64(12, "generic"), id="timedelta64[generic]")]) - def test_coercion_timedelta_convert_to_number(self, dtype, scalar): + @pytest.mark.parametrize("value, unit", [param(123, "ns", id="timedelta64[ns]")]) + def test_coercion_timedelta_convert_to_number(self, dtype, value, unit): # Only "ns" and "generic" timedeltas can be converted to numbers # so these are slightly special. + scalar = np.timedelta64(value, unit) arr = np.array(scalar, dtype=dtype) cast = np.array(scalar).astype(dtype) ass = np.ones((), dtype=dtype) @@ -436,6 +434,23 @@ def test_coercion_timedelta_convert_to_number(self, dtype, scalar): assert_array_equal(arr, cast) assert_array_equal(cast, cast) + @pytest.mark.parametrize("dtype", [np.int64, np.float32]) + @pytest.mark.parametrize("value, unit", + [param(12, "generic", id="timedelta64[generic]")]) + def test_coercion_generic_timedelta_convert_to_number(self, dtype, value, unit): + with pytest.warns( + DeprecationWarning, + match="The 'generic' unit for NumPy timedelta is deprecated", + ): + scalar = np.timedelta64(value, unit) + arr = np.array(scalar, dtype=dtype) + cast = np.array(scalar).astype(dtype) + ass = np.ones((), dtype=dtype) + ass[()] = scalar # raises, as would np.array([scalar], dtype=dtype) + + assert_array_equal(arr, cast) + assert_array_equal(cast, cast) + @pytest.mark.parametrize("dtype", ["S6", "U6"]) @pytest.mark.parametrize(["val", "unit"], [param(123, "s", id="[s]"), param(123, "D", id="[D]")]) @@ -841,7 +856,7 @@ class TestSpecialAttributeLookupFailure: class WeirdArrayLike: @property - def __array__(self, dtype=None, copy=None): # noqa: PLR0206 + def __array__(self, dtype=None, copy=None): raise RuntimeError("oops!") class WeirdArrayInterface: @@ -911,7 +926,7 @@ def test_empty_string(): @pytest.mark.parametrize("res_dt,hug_val", [("float16", "1e30"), ("float32", "1e200")]) def test_string_to_float_coercion_errors(dtype, res_dt, hug_val): - # This test primarly tests setitem + # This test primarily tests setitem val = np.array(["3M"], dtype=dtype)[0] # use the scalar with pytest.raises(ValueError): diff --git a/numpy/_core/tests/test_arraymethod.py b/numpy/_core/tests/test_arraymethod.py index 5b3d51585718..4400fccf32e8 100644 --- a/numpy/_core/tests/test_arraymethod.py +++ b/numpy/_core/tests/test_arraymethod.py @@ -11,6 +11,9 @@ import numpy as np from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl +# accessing `numpy.char.charray` will issue a deprecation warning +from numpy._core.defchararray import chararray + class TestResolveDescriptors: # Test mainly error paths of the resolve_descriptors function, @@ -62,9 +65,7 @@ def test_invalid_arguments(self, args, error): @pytest.mark.parametrize( - "cls", [ - np.ndarray, np.recarray, np.char.chararray, np.matrix, np.memmap - ] + "cls", [np.ndarray, np.recarray, chararray, np.matrix, np.memmap] ) class TestClassGetItem: def test_class_getitem(self, cls: type[np.ndarray]) -> None: diff --git a/numpy/_core/tests/test_arrayobject.py b/numpy/_core/tests/test_arrayobject.py index ffa1ba001776..0cfff63158dc 100644 --- a/numpy/_core/tests/test_arrayobject.py +++ b/numpy/_core/tests/test_arrayobject.py @@ -1,7 +1,10 @@ +import sys + import pytest import numpy as np -from numpy.testing import assert_array_equal +from numpy._core._rational_tests import rational +from numpy.testing import HAS_REFCOUNT, assert_array_equal def test_matrix_transpose_raises_error_for_1d(): @@ -73,3 +76,122 @@ def test_array_wrap(subclass_self, subclass_arr): # Non 0-D array can't be converted to scalar, so we ignore that arr1d = np.array([3], dtype=np.int8).view(subclass_arr) assert type(arr.__array_wrap__(arr1d, None, True)) is type(arr) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_cleanup_with_refs_non_contig(): + # Regression test, leaked the dtype (but also good for rest) + dtype = np.dtype("O,i") + obj = object() + expected_ref_dtype = sys.getrefcount(dtype) + expected_ref_obj = sys.getrefcount(obj) + proto = np.full((3, 4, 5, 6, 7), np.array((obj, 2), dtype=dtype)) + # Give array a non-trivial order to exercise more cleanup paths. + arr = proto.transpose((2, 0, 3, 1, 4)).copy("K") + del proto, arr + + actual_ref_dtype = sys.getrefcount(dtype) + actual_ref_obj = sys.getrefcount(obj) + assert actual_ref_dtype == expected_ref_dtype + assert actual_ref_obj == actual_ref_dtype + + +@pytest.mark.parametrize("dtype", + list("?bhilqnpBHILQNPefdgSUV") + ["M8[ns]", "m8[ns]", rational]) +def test_real_imag_attributes_non_complex(dtype): + dtype = np.dtype(dtype) + + a = np.array([[1, 2, 3], [4, 5, 6]]).astype(dtype) + assert a.real is a + # One could imagine broadcasting, but doesn't right now: + imag = a.imag + assert imag.strides == a.strides + assert imag.dtype == a.dtype + # This part is rather unclear: + assert (imag == np.zeros((), dtype=a.dtype)).all() + assert imag.flags.writeable is False + + class myarr(np.ndarray): + def __array_finalize__(self, obj): + self.finalized_with = obj + + ma = a.view(myarr) + assert ma.real is ma + assert type(ma.imag) is myarr + assert ma.imag.finalized_with is ma + + +@pytest.mark.parametrize("dtype,real_dt", + [(">c8", ">f4"), ("c16", "f8"), ("clongdouble", "longdouble")]) +@pytest.mark.parametrize("variation", ["transpose", "set_writeable"]) +def test_real_imag_attributes_complex(dtype, real_dt, variation): + a = np.array([[1, 2j, 3], [4, 5j, 6]]).astype(dtype) + real = np.array([[1, 0, 3], [4, 0, 6]], dtype=real_dt) + imag = np.array([[0, 2, 0], [0, 5, 0]], dtype=real_dt) + + if variation == "transpose": + a = a.T + real = real.T + imag = imag.T + elif variation == "set_writeable": + a.flags.writeable = False + + assert_array_equal(a.real, real) + assert_array_equal(a.imag, imag) + assert a.real.dtype == real_dt + assert a.imag.dtype == real_dt + assert np.may_share_memory(a.real, a) + assert np.may_share_memory(a.imag, a) + assert a.real.flags.writeable == a.flags.writeable + assert a.imag.flags.writeable == a.flags.writeable + + class myarr(np.ndarray): + def __array_finalize__(self, obj): + self.finalized_with = obj + + ma = a.view(myarr) + assert ma.real.finalized_with is ma + assert ma.imag.finalized_with is ma + + +def test_real_imag_attributes_object(): + a = np.array([[1, 0.5 + 2j, 3, int], [4, 5j, "string", {}]], dtype=object) + + # NOTE(seberg): doing something for non-numbers is guesswork... + real = np.array([[1, 0.5, 3, int.real], [4, 0, "string", {}]], dtype=object) + imag = np.array([[0, 2, 0, int.imag], [0, 5, 0, 0]], dtype=object) + + assert_array_equal(a.real, real) + assert_array_equal(a.imag, imag) + assert a.real.dtype == object + assert a.imag.dtype == object + assert not np.may_share_memory(a.real, a) + assert not np.may_share_memory(a.imag, a) + assert not a.real.flags.writeable + assert not a.imag.flags.writeable + + # Object returns new arrays via ufuncs, so call wrap + class myarr(np.ndarray): + def __array_wrap__(self, *args, **kwargs): + ret = super().__array_wrap__(*args, **kwargs) + ret.wrap_called = True + return ret + + ma = a.view(myarr) + assert ma.real.wrap_called + assert ma.imag.wrap_called + + +@pytest.mark.parametrize("ufunc,attr", [ + (np._core.umath.real, "real"), (np._core.umath.imag, "imag")]) +def test_real_imag_ufunc_minimal(ufunc, attr): + with pytest.raises(TypeError): + ufunc(np.array([1, 2, 3])) # non-complex or object raises + + arr = np.array([1 + 2j, 3 + 4j]) + res = ufunc(arr) + assert_array_equal(res, getattr(arr, attr), strict=True) + + arr = np.array([1 + 2j, 3 + 4j], dtype=object) + res = ufunc(arr) + assert_array_equal(res, getattr(arr, attr), strict=True) diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 06d1306dd408..297462544c6d 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -8,10 +8,12 @@ import numpy as np from numpy._core.arrayprint import _typelessdata +from numpy._utils import _pep440 from numpy.testing import ( HAS_REFCOUNT, IS_WASM, assert_, + assert_array_equal, assert_equal, assert_raises, assert_raises_regex, @@ -276,12 +278,13 @@ def test_structure_format_mixed(self): try: # for issue #5692 A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) - A[5:].fill(np.datetime64('NaT')) + A[5:].fill(np.datetime64('NaT', 'D')) + date_string = '1970-01-01T00:00:00' assert_equal( np.array2string(A), - textwrap.dedent("""\ - [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) - ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',) + textwrap.dedent(f"""\ + [('{date_string}',) ('{date_string}',) ('{date_string}',) + ('{date_string}',) ('{date_string}',) ('NaT',) ('NaT',) ('NaT',) ('NaT',) ('NaT',)]""") ) finally: @@ -300,7 +303,7 @@ def test_structure_format_mixed(self): # and again, with timedeltas A = np.full(10, 123456, dtype=[("A", "m8[s]")]) - A[5:].fill(np.datetime64('NaT')) + A[5:].fill(np.datetime64('NaT', 'D')) assert_equal( np.array2string(A), textwrap.dedent("""\ @@ -1243,7 +1246,7 @@ def test_scalar_repr_numbers(dtype, value): (np.str_('a'), "'a'", "np.str_('a')"), (np.datetime64("2012"), "numpy.datetime64('2012')", "np.datetime64('2012')"), - (np.timedelta64(1), "numpy.timedelta64(1)", "np.timedelta64(1)"), + (np.timedelta64(1, 's'), "numpy.timedelta64(1,'s')", "np.timedelta64(1,'s')"), (np.void((True, 2), dtype="?,= 7: + self.features_flags.add("ARCH_2_06") + if power_gen >= 8: + self.features_flags.add("ARCH_2_07") + if power_gen >= 9: + self.features_flags.add("ARCH_3_00") + if power_gen >= 10: + self.features_flags.add("ARCH_3_1B") + + def _get_platform(self): + """Get the AT_PLATFORM value from AUXV""" + try: + auxv = subprocess.check_output(['/bin/true'], env={"LD_SHOW_AUXV": "1"}) + for line in auxv.split(b'\n'): + if line.startswith(b'AT_PLATFORM'): + parts = line.split(b':', 1) + if len(parts) == 2: + return parts[1].strip().decode().lower() + except Exception: + pass + return None is_zarch = re.match(r"^(s390x)", machine, re.IGNORECASE) @@ -391,7 +431,7 @@ class Test_ZARCH_Features(AbstractTest): features = ["VX", "VXE", "VXE2"] def load_flags(self): - self.load_flags_auxv() + self.load_flags_cpuinfo("features") is_arm = re.match(r"^(arm|aarch64)", machine, re.IGNORECASE) @@ -420,14 +460,18 @@ def load_flags(self): else: self.features_map = { # ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32) - # doesn't provide information about ASIMD, so we assume that ASIMD is supported + # doesn't provide information about ASIMD + # so we assume that ASIMD is supported # if the kernel reports any one of the following ARM8 features. "ASIMD": ("AES", "SHA1", "SHA2", "PMULL", "CRC32") } is_loongarch = re.match(r"^(loongarch)", machine, re.IGNORECASE) -@pytest.mark.skipif(not is_linux or not is_loongarch, reason="Only for Linux and LoongArch") +@pytest.mark.skipif( + not is_linux or not is_loongarch, + reason="Only for Linux and LoongArch", +) class Test_LOONGARCH_Features(AbstractTest): features = ["LSX"] diff --git a/numpy/_core/tests/test_custom_dtypes.py b/numpy/_core/tests/test_custom_dtypes.py index 2acb4adf4c7c..13be3dfb7325 100644 --- a/numpy/_core/tests/test_custom_dtypes.py +++ b/numpy/_core/tests/test_custom_dtypes.py @@ -373,6 +373,12 @@ def test_flatiter_index(self, index): np.testing.assert_array_equal( arr.view(np.float64), arr2.view(np.float64)) + def test_conjugate(self): + # Also user dtype can just return self if conjugate should be no-op. + arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0)) + assert arr.conjugate() is arr + + @pytest.mark.thread_unsafe( reason="_ScaledFloatTestDType setup is thread-unsafe (gh-29850)" ) diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index c405a59e535e..0c81543008dd 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -86,7 +86,12 @@ def install_temp(tmpdir_factory): def test_is_timedelta64_object(install_temp): import checks - assert checks.is_td64(np.timedelta64(1234)) + with pytest.warns( + DeprecationWarning, + match="The 'generic' unit for NumPy timedelta is deprecated", + ): + assert checks.is_td64(np.timedelta64(1234)) + assert checks.is_td64(np.timedelta64(1234, "ns")) assert checks.is_td64(np.timedelta64("NaT", "ns")) @@ -105,7 +110,12 @@ def test_is_datetime64_object(install_temp): assert not checks.is_dt64(1) assert not checks.is_dt64(None) assert not checks.is_dt64("foo") - assert not checks.is_dt64(np.timedelta64(1234)) + + with pytest.warns( + DeprecationWarning, + match="The 'generic' unit for NumPy timedelta is deprecated", + ): + assert not checks.is_dt64(np.timedelta64(1234)) def test_get_datetime64_value(install_temp): @@ -350,3 +360,18 @@ def test_npystring_allocators_other_dtype(install_temp): def test_npy_uintp_type_enum(install_temp): import checks assert checks.check_npy_uintp_type_enum() + + +@pytest.mark.skipif( + sys.version_info < (3, 14), + reason="Tests behavior that happens on Python 3.14 and newer" +) +@pytest.mark.skipif( + sysconfig.get_platform() == 'win-arm64', + reason='no checks module on win-arm64' +) +def test_resize_refcheck(install_temp): + import checks + msg = "It is possible that this is a false positive." + with pytest.raises(ValueError, match=msg): + checks.resize_refcheck_test() diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index c7b11149ed43..4825e6926179 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -1,6 +1,7 @@ import datetime import pickle import warnings +from typing import Final from zoneinfo import ZoneInfo, ZoneInfoNotFoundError import pytest @@ -34,6 +35,9 @@ def _assert_equal_hash(v1, v2): class TestDateTime: + generic_unit_deprecation_message: Final[str] = ( + "The 'generic' unit for NumPy timedelta is deprecated" + ) def test_string(self): msg = "no explicit representation of timezones available for " \ @@ -202,10 +206,15 @@ def test_prohibit_negative_datetime(self, unit): def test_compare_generic_nat(self): # regression tests for gh-6452 - assert_(np.datetime64('NaT') != - np.datetime64('2000') + np.timedelta64('NaT')) - assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us')) - assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT')) + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message, + ): + assert_( + np.datetime64("NaT") != np.datetime64("2000") + np.timedelta64("NaT") + ) + assert_(np.datetime64("NaT") != np.datetime64("NaT", "us")) + assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT')) @pytest.mark.parametrize("size", [ 3, 21, 217, 1000]) @@ -213,7 +222,7 @@ def test_datetime_nat_argsort_stability(self, size): # NaT < NaT should be False internally for # sort stability expected = np.arange(size) - arr = np.tile(np.datetime64('NaT'), size) + arr = np.tile(np.datetime64('NaT', 'D'), size) assert_equal(np.argsort(arr, kind='mergesort'), expected) @pytest.mark.parametrize("size", [ @@ -222,7 +231,7 @@ def test_timedelta_nat_argsort_stability(self, size): # NaT < NaT should be False internally for # sort stability expected = np.arange(size) - arr = np.tile(np.timedelta64('NaT'), size) + arr = np.tile(np.timedelta64('NaT', 's'), size) assert_equal(np.argsort(arr, kind='mergesort'), expected) @pytest.mark.parametrize("arr, expected", [ @@ -258,25 +267,35 @@ def test_datetime_scalar_construction(self): assert_equal(np.datetime64('1950-03-12T13', 's'), np.datetime64('1950-03-12T13', 'm')) - # Default construction means NaT - assert_equal(np.datetime64(), np.datetime64('NaT')) + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message + ): + # Default construction means NaT + assert_equal(np.datetime64(), np.datetime64('NaT')) + + # Some basic strings and repr + assert_equal(str(np.datetime64('NaT')), 'NaT') + assert_equal(repr(np.datetime64('NaT')), + "np.datetime64('NaT','generic')") + + # None gets constructed as NaT + assert_equal(np.datetime64(None), np.datetime64('NaT')) - # Some basic strings and repr - assert_equal(str(np.datetime64('NaT')), 'NaT') - assert_equal(repr(np.datetime64('NaT')), - "np.datetime64('NaT','generic')") assert_equal(str(np.datetime64('2011-02')), '2011-02') assert_equal(repr(np.datetime64('2011-02')), "np.datetime64('2011-02')") - assert_equal(repr(np.datetime64('NaT').astype(np.dtype("datetime64[ns]"))), + assert_equal(repr(np.datetime64('NaT', 'D').astype(np.dtype("datetime64[ns]"))), "np.datetime64('NaT','ns')") - # None gets constructed as NaT - assert_equal(np.datetime64(None), np.datetime64('NaT')) + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message + ): + # Default construction of NaT is in generic units + assert_equal(np.datetime64().dtype, np.dtype('M8')) - # Default construction of NaT is in generic units - assert_equal(np.datetime64().dtype, np.dtype('M8')) - assert_equal(np.datetime64('NaT').dtype, np.dtype('M8')) + assert_equal(np.datetime64('NaT').dtype, np.dtype('M8')) # Construction from integers requires a specified unit assert_raises(ValueError, np.datetime64, 17) @@ -389,8 +408,11 @@ def test_timedelta_np_int_construction(self, unit): assert_equal(np.timedelta64(np.int64(123), unit), np.timedelta64(123, unit)) else: - assert_equal(np.timedelta64(np.int64(123)), - np.timedelta64(123)) + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message + ): + assert_equal(np.timedelta64(np.int64(123)), np.timedelta64(123)) def test_timedelta_scalar_construction(self): # Construct with different units @@ -399,24 +421,36 @@ def test_timedelta_scalar_construction(self): assert_equal(np.timedelta64(120, 's'), np.timedelta64(2, 'm')) - # Default construction means 0 - assert_equal(np.timedelta64(), np.timedelta64(0)) + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message, + ): + # Default construction means 0 + assert_equal(np.timedelta64(), np.timedelta64(0)) # None gets constructed as NaT - assert_equal(np.timedelta64(None), np.timedelta64('NaT')) + assert_equal(np.timedelta64(None, 's'), np.timedelta64('NaT', 's')) # Some basic strings and repr - assert_equal(str(np.timedelta64('NaT')), 'NaT') - assert_equal(repr(np.timedelta64('NaT')), - "np.timedelta64('NaT')") + assert_equal(str(np.timedelta64('NaT', 'ns')), 'NaT') + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message, + ): + assert_equal(repr(np.timedelta64("NaT")), "np.timedelta64('NaT')") assert_equal(str(np.timedelta64(3, 's')), '3 seconds') assert_equal(repr(np.timedelta64(-3, 's')), "np.timedelta64(-3,'s')") - assert_equal(repr(np.timedelta64(12)), - "np.timedelta64(12)") - # Construction from an integer produces generic units - assert_equal(np.timedelta64(12).dtype, np.dtype('m8')) + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message, + ): + assert_equal(repr(np.timedelta64(12)), + "np.timedelta64(12)") + + # Construction from an integer produces generic units + assert_equal(np.timedelta64(12).dtype, np.dtype('m8')) # When constructing from a scalar or zero-dimensional array, # it either keeps the units or you can override them. @@ -499,7 +533,11 @@ def test_timedelta_0_dim_object_array_conversion(self): def test_timedelta_nat_format(self): # gh-17552 - assert_equal('NaT', f'{np.timedelta64("nat")}') + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message, + ): + assert_equal('NaT', f'{np.timedelta64("nat")}') def test_timedelta_scalar_construction_units(self): # String construction detecting units @@ -615,7 +653,7 @@ def test_datetime_nat_casting(self): clnan = nan.astype('G') hnan = nan.astype(np.half) - nat = np.array([np.datetime64('NaT')] * 8 + [np.datetime64(0, 'D')]) + nat = np.array([np.datetime64('NaT', 's')] * 8 + [np.datetime64(0, 'D')]) assert_equal(nan.astype('M8[ns]'), nat) assert_equal(fnan.astype('M8[ns]'), nat) assert_equal(lnan.astype('M8[ns]'), nat) @@ -624,7 +662,7 @@ def test_datetime_nat_casting(self): assert_equal(clnan.astype('M8[ns]'), nat) assert_equal(hnan.astype('M8[ns]'), nat) - nat = np.array([np.timedelta64('NaT')] * 8 + [np.timedelta64(0)]) + nat = np.array([np.timedelta64('NaT', 'D')] * 8 + [np.timedelta64(0, 's')]) assert_equal(nan.astype('timedelta64[ns]'), nat) assert_equal(fnan.astype('timedelta64[ns]'), nat) assert_equal(lnan.astype('timedelta64[ns]'), nat) @@ -828,28 +866,36 @@ def test_datetime_array_str(self): assert_equal(str(a), "['2010' 'NaT' '2030']") def test_timedelta_array_str(self): - a = np.array([-1, 0, 100], dtype='m') + a = np.array([-1, 0, 100], dtype='m8[s]') assert_equal(str(a), "[ -1 0 100]") - a = np.array(['NaT', 'NaT'], dtype='m') + a = np.array(['NaT', 'NaT'], dtype='m8[ns]') assert_equal(str(a), "['NaT' 'NaT']") # Check right-alignment with NaTs - a = np.array([-1, 'NaT', 0], dtype='m') + a = np.array([-1, 'NaT', 0], dtype='m8[s]') assert_equal(str(a), "[ -1 'NaT' 0]") - a = np.array([-1, 'NaT', 1234567], dtype='m') + a = np.array([-1, 'NaT', 1234567], dtype='m8[ns]') assert_equal(str(a), "[ -1 'NaT' 1234567]") # Test with other byteorder: - a = np.array([-1, 'NaT', 1234567], dtype='>m') + a = np.array([-1, 'NaT', 1234567], dtype='>m8[s]') assert_equal(str(a), "[ -1 'NaT' 1234567]") - a = np.array([-1, 'NaT', 1234567], dtype=''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\ - b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." - assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n"\ + b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n"\ + b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype(''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\ + b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) def test_gh_29555(self): # check that dtype metadata round-trips when none @@ -957,6 +1001,98 @@ def cast2(): numpy.datetime64("2014").astype(" nanoseconds. + # INT64_MAX / 1e9 ≈ 9.2e9 seconds ≈ 292 years from epoch, + # so dates beyond ~2262 overflow when cast to ns. + + # gh-16352: upconversion to finer units overflows + arr = np.array(["2367-12-31 12:00:00"], dtype="datetime64[h]") + with pytest.raises(OverflowError, match="Overflow"): + arr.astype("datetime64[ns]") + + # gh-16352: scalar case + val = np.datetime64("3000-01-01", "s") + with pytest.raises(OverflowError, match="Overflow"): + val.astype("datetime64[ns]") + + # gh-22346: downconversion to coarser units overflows near INT64_MIN + dt = np.datetime64(np.iinfo(np.int64).min + 1, "s") + with pytest.raises(OverflowError, match="Overflow"): + dt.astype("M8[m]") + + # negative overflow (far in the past) + val_neg = np.datetime64("0001-01-01", "s") + with pytest.raises(OverflowError, match="Overflow"): + val_neg.astype("datetime64[ns]") + + # timedelta overflow (strided cast path in dtype_transfer.c) + td = np.timedelta64(2**62, "s") + with pytest.raises(OverflowError, match="Overflow"): + td.astype("timedelta64[ns]") + + # timedelta overflow (scalar cast path in datetime.c via + # cast_timedelta_to_timedelta) + td_big = np.timedelta64(2**62, "s") + with pytest.raises(OverflowError, match="Overflow"): + np.array(td_big, dtype="timedelta64[ns]") + + # timedelta exact boundary: INT64_MAX // 1e9 = 9223372036 + td_ok = np.timedelta64(9223372036, "s") + result_td = td_ok.astype("timedelta64[ns]") + assert result_td == np.timedelta64(9223372036000000000, "ns") + + td_bad = np.timedelta64(9223372037, "s") + with pytest.raises(OverflowError, match="Overflow"): + td_bad.astype("timedelta64[ns]") + + # negative timedelta overflow + td_neg = np.timedelta64(-9223372037, "s") + with pytest.raises(OverflowError, match="Overflow"): + td_neg.astype("timedelta64[ns]") + + # timedelta NaT passthrough + td_nat = np.timedelta64("NaT", "s") + result_td_nat = td_nat.astype("timedelta64[ns]") + assert np.isnat(result_td_nat) + + # valid conversions near the boundary should still work + val_ok = np.datetime64("2020-01-01", "s") + result = val_ok.astype("datetime64[ns]") + assert result == np.datetime64("2020-01-01", "ns") + + arr_ok = np.array(["2000-01-01", "2020-06-15"], dtype="datetime64[s]") + result_arr = arr_ok.astype("datetime64[ns]") + expected = np.array(["2000-01-01", "2020-06-15"], dtype="datetime64[ns]") + assert_equal(result_arr, expected) + + # NaT should pass through without raising + arr_nat = np.array(["NaT", "2020-01-01"], dtype="datetime64[s]") + result_nat = arr_nat.astype("datetime64[ns]") + assert np.isnat(result_nat[0]) + assert result_nat[1] == np.datetime64("2020-01-01", "ns") + + # Exact boundary: INT64_MAX // 1e9 = 9223372036 seconds is OK, + # 9223372037 seconds overflows when cast to ns. + ok_boundary = np.datetime64(9223372036, "s") + result_boundary = ok_boundary.astype("datetime64[ns]") + assert result_boundary == np.datetime64(9223372036, "s") + + bad_boundary = np.datetime64(9223372037, "s") + with pytest.raises(OverflowError, match="Overflow"): + bad_boundary.astype("datetime64[ns]") + + # Exercise the num != 1 code path (e.g. "2s" metadata) + arr_2s = np.array([3], dtype="datetime64[2s]") + result_2s = arr_2s.astype("datetime64[s]") + assert result_2s[0] == np.datetime64(6, "s") + + # Overflow with num != 1 + arr_2s_big = np.array([np.iinfo(np.int64).max // 2], dtype="datetime64[2s]") + with pytest.raises(OverflowError, match="Overflow"): + arr_2s_big.astype("datetime64[ns]") + def test_pyobject_roundtrip(self): # All datetime types should be able to roundtrip through object a = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -1080,21 +1216,18 @@ def test_datetime_like(self): assert_equal(np.zeros_like(b).dtype, b.dtype) assert_equal(np.empty_like(b).dtype, b.dtype) - def test_datetime_unary(self): - for tda, tdb, tdzero, tdone, tdmone in \ + def test_timedelta64_unary(self): + for tda, tdb, tdzero in \ [ # One-dimensional arrays (np.array([3], dtype='m8[D]'), np.array([-3], dtype='m8[D]'), - np.array([0], dtype='m8[D]'), - np.array([1], dtype='m8[D]'), - np.array([-1], dtype='m8[D]')), + np.array([0], dtype='m8[D]')), # NumPy scalars (np.timedelta64(3, '[D]'), np.timedelta64(-3, '[D]'), - np.timedelta64(0, '[D]'), - np.timedelta64(1, '[D]'), - np.timedelta64(-1, '[D]'))]: + np.timedelta64(0, '[D]')), + ]: # negative ufunc assert_equal(-tdb, tda) assert_equal((-tdb).dtype, tda.dtype) @@ -1112,13 +1245,24 @@ def test_datetime_unary(self): assert_equal(np.absolute(tdb).dtype, tda.dtype) # sign ufunc - assert_equal(np.sign(tda), tdone) - assert_equal(np.sign(tdb), tdmone) - assert_equal(np.sign(tdzero), tdzero) - assert_equal(np.sign(tda).dtype, tda.dtype) - - # The ufuncs always produce native-endian results - assert_ + assert_equal(np.sign(tda), np.ones_like(tda, dtype=np.float64), + strict=True) + assert_equal(np.sign(tdb), -np.ones_like(tdb, dtype=np.float64), + strict=True) + assert_equal(np.sign(tdzero), np.zeros_like(tdzero, dtype=np.float64), + strict=True) + + def test_timedelta64_sign_nat(self): + x = np.array([np.timedelta64(-123, 's'), + np.timedelta64(0, 's'), + np.timedelta64(88, 's'), + np.timedelta64('NaT', 's')]) + s = np.sign(x) + assert_equal(s, np.array([-1.0, 0.0, 1.0, np.nan]), strict=True) + + def test_timedelta64_sign_nat_scalar(self): + nat = np.timedelta64('nat', 'm') + assert_equal(np.sign(nat), np.nan) def test_datetime_add(self): for dta, dtb, dtc, dtnat, tda, tdb, tdc in \ @@ -1142,34 +1286,39 @@ def test_datetime_add(self): # m8 + m8 assert_equal(tda + tdb, tdc) assert_equal((tda + tdb).dtype, np.dtype('m8[h]')) - # m8 + bool - assert_equal(tdb + True, tdb + 1) - assert_equal((tdb + True).dtype, np.dtype('m8[h]')) - # m8 + int - assert_equal(tdb + 3 * 24, tdc) - assert_equal((tdb + 3 * 24).dtype, np.dtype('m8[h]')) - # bool + m8 - assert_equal(False + tdb, tdb) - assert_equal((False + tdb).dtype, np.dtype('m8[h]')) - # int + m8 - assert_equal(3 * 24 + tdb, tdc) - assert_equal((3 * 24 + tdb).dtype, np.dtype('m8[h]')) - # M8 + bool - assert_equal(dta + True, dta + 1) - assert_equal(dtnat + True, dtnat) - assert_equal((dta + True).dtype, np.dtype('M8[D]')) - # M8 + int - assert_equal(dta + 3, dtb) - assert_equal(dtnat + 3, dtnat) - assert_equal((dta + 3).dtype, np.dtype('M8[D]')) - # bool + M8 - assert_equal(False + dta, dta) - assert_equal(False + dtnat, dtnat) - assert_equal((False + dta).dtype, np.dtype('M8[D]')) - # int + M8 - assert_equal(3 + dta, dtb) - assert_equal(3 + dtnat, dtnat) - assert_equal((3 + dta).dtype, np.dtype('M8[D]')) + + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message, + ): + # m8 + bool + assert_equal(tdb + True, tdb + 1) + assert_equal((tdb + True).dtype, np.dtype('m8[h]')) + # m8 + int + assert_equal(tdb + 3 * 24, tdc) + assert_equal((tdb + 3 * 24).dtype, np.dtype('m8[h]')) + # bool + m8 + assert_equal(False + tdb, tdb) + assert_equal((False + tdb).dtype, np.dtype('m8[h]')) + # int + m8 + assert_equal(3 * 24 + tdb, tdc) + assert_equal((3 * 24 + tdb).dtype, np.dtype('m8[h]')) + # M8 + bool + assert_equal(dta + True, dta + 1) + assert_equal(dtnat + True, dtnat) + assert_equal((dta + True).dtype, np.dtype('M8[D]')) + # M8 + int + assert_equal(dta + 3, dtb) + assert_equal(dtnat + 3, dtnat) + assert_equal((dta + 3).dtype, np.dtype('M8[D]')) + # bool + M8 + assert_equal(False + dta, dta) + assert_equal(False + dtnat, dtnat) + assert_equal((False + dta).dtype, np.dtype('M8[D]')) + # int + M8 + assert_equal(3 + dta, dtb) + assert_equal(3 + dtnat, dtnat) + assert_equal((3 + dta).dtype, np.dtype('M8[D]')) # M8 + m8 assert_equal(dta + tda, dtb) assert_equal(dtnat + tda, dtnat) @@ -1217,27 +1366,31 @@ def test_datetime_subtract(self): assert_equal(tda - tdb, tdc) assert_equal((tda - tdb).dtype, np.dtype('m8[h]')) assert_equal(tdb - tda, -tdc) - assert_equal((tdb - tda).dtype, np.dtype('m8[h]')) - # m8 - bool - assert_equal(tdc - True, tdc - 1) - assert_equal((tdc - True).dtype, np.dtype('m8[h]')) - # m8 - int - assert_equal(tdc - 3 * 24, -tdb) - assert_equal((tdc - 3 * 24).dtype, np.dtype('m8[h]')) - # int - m8 - assert_equal(False - tdb, -tdb) - assert_equal((False - tdb).dtype, np.dtype('m8[h]')) - # int - m8 - assert_equal(3 * 24 - tdb, tdc) - assert_equal((3 * 24 - tdb).dtype, np.dtype('m8[h]')) - # M8 - bool - assert_equal(dtb - True, dtb - 1) - assert_equal(dtnat - True, dtnat) - assert_equal((dtb - True).dtype, np.dtype('M8[D]')) - # M8 - int - assert_equal(dtb - 3, dta) - assert_equal(dtnat - 3, dtnat) - assert_equal((dtb - 3).dtype, np.dtype('M8[D]')) + assert_equal((tdb - tda).dtype, np.dtype("m8[h]")) + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message, + ): + # m8 - bool + assert_equal(tdc - True, tdc - 1) + assert_equal((tdc - True).dtype, np.dtype('m8[h]')) + # m8 - int + assert_equal(tdc - 3 * 24, -tdb) + assert_equal((tdc - 3 * 24).dtype, np.dtype('m8[h]')) + # int - m8 + assert_equal(False - tdb, -tdb) + assert_equal((False - tdb).dtype, np.dtype('m8[h]')) + # int - m8 + assert_equal(3 * 24 - tdb, tdc) + assert_equal((3 * 24 - tdb).dtype, np.dtype('m8[h]')) + # M8 - bool + assert_equal(dtb - True, dtb - 1) + assert_equal(dtnat - True, dtnat) + assert_equal((dtb - True).dtype, np.dtype('M8[D]')) + # M8 - int + assert_equal(dtb - 3, dta) + assert_equal(dtnat - 3, dtnat) + assert_equal((dtb - 3).dtype, np.dtype('M8[D]')) # M8 - m8 assert_equal(dtb - tda, dta) assert_equal(dtnat - tda, dtnat) @@ -1310,7 +1463,7 @@ def test_datetime_multiply(self): with warnings.catch_warnings(): warnings.filterwarnings( 'ignore', "invalid value encountered in multiply", RuntimeWarning) - nat = np.timedelta64('NaT') + nat = np.timedelta64('NaT', 's') def check(a, b, res): assert_equal(a * b, res) @@ -1319,8 +1472,8 @@ def check(a, b, res): check(nat, tp(2), nat) check(nat, tp(0), nat) for f in (float('inf'), float('nan')): - check(np.timedelta64(1), f, nat) - check(np.timedelta64(0), f, nat) + check(np.timedelta64(1, "s"), f, nat) + check(np.timedelta64(0, "s"), f, nat) check(nat, f, nat) @pytest.mark.parametrize("op1, op2, exp", [ @@ -1340,34 +1493,37 @@ def check(a, b, res): (np.timedelta64(1, 'm'), np.timedelta64(31, 's'), 1), - # m8 generic units - (np.timedelta64(1890), - np.timedelta64(31), - 60), # Y // M works (np.timedelta64(2, 'Y'), np.timedelta64('13', 'M'), 1), # handle 1D arrays - (np.array([1, 2, 3], dtype='m8'), - np.array([2], dtype='m8'), + (np.array([1, 2, 3], dtype='m8[s]'), + np.array([2], dtype='m8[s]'), np.array([0, 1, 1], dtype=np.int64)), ]) def test_timedelta_floor_divide(self, op1, op2, exp): assert_equal(op1 // op2, exp) + def test_generic_timedelta_floor_divide(self): + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message, + ): + assert_equal(np.timedelta64(1890) // np.timedelta64(31), 60) + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") @pytest.mark.parametrize("op1, op2", [ # div by 0 (np.timedelta64(10, 'us'), np.timedelta64(0, 'us')), # div with NaT - (np.timedelta64('NaT'), + (np.timedelta64('NaT', 'us'), np.timedelta64(50, 'us')), # special case for int64 min # in integer floor division - (np.timedelta64(np.iinfo(np.int64).min), - np.timedelta64(-1)), + (np.timedelta64(np.iinfo(np.int64).min, "s"), + np.timedelta64(-1, "s")), ]) def test_timedelta_floor_div_warnings(self, op1, op2): with pytest.warns(RuntimeWarning): @@ -1385,12 +1541,16 @@ def test_timedelta_floor_div_warnings(self, op1, op2): (9007199254740999, -2), ]) def test_timedelta_floor_div_precision(self, val1, val2): - op1 = np.timedelta64(val1) - op2 = np.timedelta64(val2) - actual = op1 // op2 - # Python reference integer floor - expected = val1 // val2 - assert_equal(actual, expected) + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message, + ): + op1 = np.timedelta64(val1) + op2 = np.timedelta64(val2) + actual = op1 // op2 + # Python reference integer floor + expected = val1 // val2 + assert_equal(actual, expected) @pytest.mark.parametrize("val1, val2", [ # years and months sometimes can't be unambiguously @@ -1417,20 +1577,31 @@ def test_timedelta_floor_div_error(self, val1, val2): # m8 different units (np.timedelta64(1, 'm'), np.timedelta64(31, 's')), - # m8 generic units - (np.timedelta64(1890), - np.timedelta64(31)), # Y // M works (np.timedelta64(2, 'Y'), np.timedelta64('13', 'M')), # handle 1D arrays - (np.array([1, 2, 3], dtype='m8'), - np.array([2], dtype='m8')), + (np.array([1, 2, 3], dtype='m8[ns]'), + np.array([2], dtype='m8[ns]')), ]) def test_timedelta_divmod(self, op1, op2): expected = (op1 // op2, op1 % op2) assert_equal(divmod(op1, op2), expected) + @pytest.mark.parametrize("op1, op2", [ + # m8 generic units + (1890, 31), + ]) + def test_generic_timedelta_divmod(self, op1, op2): + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message, + ): + op1 = np.timedelta64(op1) + op2 = np.timedelta64(op2) + expected = (op1 // op2, op1 % op2) + assert_equal(divmod(op1, op2), expected) + @pytest.mark.parametrize("op1, op2", [ # Y and M are incompatible with all units except Y and M (np.timedelta64(1, 'Y'), np.timedelta64(1, 's')), @@ -1446,12 +1617,12 @@ def test_timedelta_divmod_typeerror(self, op1, op2): (np.timedelta64(10, 'us'), np.timedelta64(0, 'us')), # div with NaT - (np.timedelta64('NaT'), + (np.timedelta64('NaT', 'us'), np.timedelta64(50, 'us')), # special case for int64 min # in integer floor division - (np.timedelta64(np.iinfo(np.int64).min), - np.timedelta64(-1)), + (np.timedelta64(np.iinfo(np.int64).min, 'ns'), + np.timedelta64(-1, 'ns')), ]) def test_timedelta_divmod_warnings(self, op1, op2): with pytest.warns(RuntimeWarning): @@ -1511,7 +1682,12 @@ def test_datetime_divide(self): with warnings.catch_warnings(): warnings.filterwarnings( 'ignore', r".*encountered in divide", RuntimeWarning) - nat = np.timedelta64('NaT') + warnings.filterwarnings( + "ignore", + self.generic_unit_deprecation_message, + DeprecationWarning, + ) + nat = np.timedelta64('NaT', 's') for tp in (int, float): assert_equal(np.timedelta64(1) / tp(0), nat) assert_equal(np.timedelta64(0) / tp(0), nat) @@ -1974,26 +2150,35 @@ def test_datetime_arange(self): np.array(['1950-02-10', '1950-02-09', '1950-02-08', '1950-02-07'], dtype='M8[D]')) - # Unit should be detected as months here - a = np.arange('1969-05', '1970-05', 2, dtype='M8') - assert_equal(a.dtype, np.dtype('M8[M]')) - assert_equal(a, - np.datetime64('1969-05') + np.arange(12, step=2)) - - # datetime, integer|timedelta works as well - # produces arange (start, start + stop) in this case - a = np.arange('1969', 18, 3, dtype='M8') - assert_equal(a.dtype, np.dtype('M8[Y]')) - assert_equal(a, - np.datetime64('1969') + np.arange(18, step=3)) - a = np.arange('1969-12-19', 22, np.timedelta64(2), dtype='M8') - assert_equal(a.dtype, np.dtype('M8[D]')) - assert_equal(a, - np.datetime64('1969-12-19') + np.arange(22, step=2)) + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message, + ): + # Unit should be detected as months here + a = np.arange("1969-05", "1970-05", 2, dtype="M8") + assert_equal(a.dtype, np.dtype('M8[M]')) + assert_equal(a, + np.datetime64('1969-05') + np.arange(12, step=2)) + + # datetime, integer|timedelta works as well + # produces arange (start, start + stop) in this case + a = np.arange('1969', 18, 3, dtype='M8') + assert_equal(a.dtype, np.dtype('M8[Y]')) + assert_equal(a, + np.datetime64('1969') + np.arange(18, step=3)) + a = np.arange('1969-12-19', 22, np.timedelta64(2), dtype='M8') + assert_equal(a.dtype, np.dtype('M8[D]')) + assert_equal(a, + np.datetime64('1969-12-19') + np.arange(22, step=2)) # Step of 0 is disallowed - assert_raises(ValueError, np.arange, np.datetime64('today'), - np.datetime64('today') + 3, 0) + assert_raises( + ValueError, + np.arange, + np.datetime64("today"), + np.datetime64("today") + np.timedelta64(3, "D"), + np.timedelta64(0, "D"), + ) # Promotion across nonlinear unit boundaries is disallowed assert_raises(TypeError, np.arange, np.datetime64('2011-03-01', 'D'), np.timedelta64(5, 'M')) @@ -2003,21 +2188,26 @@ def test_datetime_arange(self): def test_datetime_arange_no_dtype(self): d = np.array('2010-01-04', dtype="M8[D]") - assert_equal(np.arange(d, d + 1), d) + assert_equal(np.arange(d, d + np.timedelta64(1, "D")), d) assert_raises(ValueError, np.arange, d) def test_timedelta_arange(self): - a = np.arange(3, 10, dtype='m8') - assert_equal(a.dtype, np.dtype('m8')) - assert_equal(a, np.timedelta64(0) + np.arange(3, 10)) - - a = np.arange(np.timedelta64(3, 's'), 10, 2, dtype='m8') + a = np.arange(3, 10, dtype='m8[s]') + assert_equal(a.dtype, np.dtype('m8[s]')) + assert_equal(a, np.timedelta64(0, "s") + np.arange(3, 10, dtype="m8[s]")) + + a = np.arange( + np.timedelta64(3, "s"), + np.timedelta64(10, "s"), + np.timedelta64(2, "s"), + dtype="m8", + ) assert_equal(a.dtype, np.dtype('m8[s]')) assert_equal(a, np.timedelta64(0, 's') + np.arange(3, 10, 2)) # Step of 0 is disallowed - assert_raises(ValueError, np.arange, np.timedelta64(0), - np.timedelta64(5), 0) + assert_raises(ValueError, np.arange, np.timedelta64(0, 's'), + np.timedelta64(5, 's'), np.timedelta64(0, 's')) # Promotion across nonlinear unit boundaries is disallowed assert_raises(TypeError, np.arange, np.timedelta64(0, 'D'), np.timedelta64(5, 'M')) @@ -2051,9 +2241,9 @@ def test_timedelta_arange(self): np.timedelta64(727, 'ns'), np.timedelta64(273, 'ns')), # NaT is propagated - (np.timedelta64('NaT'), + (np.timedelta64('NaT', 'ns'), np.timedelta64(50, 'ns'), - np.timedelta64('NaT')), + np.timedelta64('NaT', 'ns')), # Y % M works (np.timedelta64(2, 'Y'), np.timedelta64(22, 'M'), @@ -2078,7 +2268,7 @@ def test_timedelta_modulus_error(self, val1, val2): def test_timedelta_modulus_div_by_zero(self): with pytest.warns(RuntimeWarning): actual = np.timedelta64(10, 's') % np.timedelta64(0, 's') - assert_equal(actual, np.timedelta64('NaT')) + assert_equal(actual, np.timedelta64('NaT', 's')) @pytest.mark.parametrize("val1, val2", [ # cases where one operand is not @@ -2097,8 +2287,8 @@ def test_timedelta_modulus_type_resolution(self, val1, val2): def test_timedelta_arange_no_dtype(self): d = np.array(5, dtype="m8[D]") - assert_equal(np.arange(d, d + 1), d) - assert_equal(np.arange(d), np.arange(0, d)) + assert_equal(np.arange(d, d + np.timedelta64(1, "D")), d) + assert_equal(np.arange(d), np.arange(np.timedelta64(0, "D"), d)) def test_datetime_maximum_reduce(self): a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]') @@ -2186,12 +2376,12 @@ def test_datetime_busday_offset(self): np.datetime64('2007-02-25')) # NaT values when roll is not raise - assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='nat'), - np.datetime64('NaT')) - assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='following'), - np.datetime64('NaT')) - assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'), - np.datetime64('NaT')) + assert_equal(np.busday_offset(np.datetime64('NaT', 'D'), 1, roll='nat'), + np.datetime64('NaT', 'D')) + assert_equal(np.busday_offset(np.datetime64('NaT', 'D'), 1, roll='following'), + np.datetime64('NaT', 'D')) + assert_equal(np.busday_offset(np.datetime64('NaT', 'D'), 1, roll='preceding'), + np.datetime64('NaT', 'D')) def test_datetime_busdaycalendar(self): # Check that it removes NaT, duplicates, and weekends @@ -2212,6 +2402,11 @@ def test_datetime_busdaycalendar(self): bdd = np.busdaycalendar(weekmask="0011001") assert_equal(bdd.weekmask, np.array([0, 0, 1, 1, 0, 0, 1], dtype='?')) + # Check length 7 bool array. + mask = np.array([False, True, True, True, True, False, False]) + bdd = np.busdaycalendar(weekmask=mask) + assert_equal(bdd.weekmask, mask, strict=True) + # Check length 7 string weekmask. bdd = np.busdaycalendar(weekmask="Mon Tue") assert_equal(bdd.weekmask, np.array([1, 1, 0, 0, 0, 0, 0], dtype='?')) @@ -2439,9 +2634,9 @@ def test_datetime_busday_holidays_count(self): assert_equal(np.busday_count('2011-04', '2011-03', weekmask='Mon'), -4) sunday = np.datetime64('2023-03-05') - monday = sunday + 1 - friday = sunday + 5 - saturday = sunday + 6 + monday = sunday + np.timedelta64(1, 'D') + friday = sunday + np.timedelta64(5, 'D') + saturday = sunday + np.timedelta64(6, 'D') assert_equal(np.busday_count(sunday, monday), 0) assert_equal(np.busday_count(monday, sunday), -1) @@ -2541,7 +2736,7 @@ def test_isfinite_isinf_isnan_units(self, unit, dstr): def test_assert_equal(self): assert_raises(AssertionError, assert_equal, - np.datetime64('nat'), np.timedelta64('nat')) + np.datetime64('nat', 's'), np.timedelta64('nat', 's')) def test_corecursive_input(self): # construct a co-recursive list @@ -2605,12 +2800,20 @@ def test_limit_str_roundtrip(self, time_unit, sign): limit_via_str = np.datetime64(str(limit), time_unit) assert limit_via_str == limit + def test_cast_to_truncated_string_doesnt_overflow(self): + a = np.array([1, -2, 1], dtype='timedelta64[D]') + assert_array_equal(a.astype('U1'), ['1', '-', '1']) + def test_datetime_hash_nat(self): - nat1 = np.datetime64() - nat2 = np.datetime64() - assert nat1 is not nat2 - assert nat1 != nat2 - assert hash(nat1) != hash(nat2) + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message + ): + nat1 = np.datetime64() + nat2 = np.datetime64() + assert nat1 is not nat2 + assert nat1 != nat2 + assert hash(nat1) != hash(nat2) @pytest.mark.parametrize('unit', ('Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us')) def test_datetime_hash_weeks(self, unit): @@ -2659,7 +2862,11 @@ def test_datetime_hash_big_positive(self, wk, unit): _assert_equal_hash(dt, dt2) def test_timedelta_hash_generic(self): - assert_raises(ValueError, hash, np.timedelta64(123)) # generic + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message, + ): + assert_raises(ValueError, hash, np.timedelta64(123)) # generic @pytest.mark.parametrize('unit', ('Y', 'M')) def test_timedelta_hash_year_month(self, unit): @@ -2748,6 +2955,17 @@ def test_true_divide_object_by_timedelta( results = inputs / divisor assert_array_equal(results, expected) + @pytest.mark.parametrize( + "atol", [np.timedelta64(1, "s"), np.timedelta64(1, "ms")] + ) + def test_assert_all_close_with_timedelta_atol( + self, atol: np.timedelta64 | datetime.timedelta + ): + # gh-30382 + a = np.array([1, 2], dtype="m8[s]") + b = np.array([3, 4], dtype="m8[s]") + with pytest.raises(AssertionError): + np.testing.assert_allclose(a, b, atol=atol) class TestDateTimeData: diff --git a/numpy/_core/tests/test_defchararray.py b/numpy/_core/tests/test_defchararray.py index e98632b62829..33db8747e0e6 100644 --- a/numpy/_core/tests/test_defchararray.py +++ b/numpy/_core/tests/test_defchararray.py @@ -13,6 +13,12 @@ kw_unicode_true = {'unicode': True} # make 2to3 work properly kw_unicode_false = {'unicode': False} +ignore_charray_deprecation = pytest.mark.filterwarnings( + r"ignore:\w+ (chararray|array|asarray) \w+:DeprecationWarning" +) + + +@ignore_charray_deprecation class TestBasic: def test_from_object_array(self): A = np.array([['abc', 2], @@ -134,6 +140,7 @@ def fail(): assert_raises(ValueError, fail) +@ignore_charray_deprecation class TestWhitespace: def test1(self): A = np.array([['abc ', '123 '], @@ -147,12 +154,14 @@ def test1(self): assert_(not np.any(A < B)) assert_(not np.any(A != B)) +@ignore_charray_deprecation class TestChar: def test_it(self): A = np.array('abc1', dtype='c').view(np.char.chararray) assert_equal(A.shape, (4,)) assert_equal(A.upper()[:2].tobytes(), b'AB') +@ignore_charray_deprecation class TestComparisons: def A(self): return np.array([['abc', 'abcc', '123'], @@ -199,6 +208,7 @@ def test_type(self): assert_(isinstance(out1, np.ndarray)) assert_(isinstance(out2, np.ndarray)) +@ignore_charray_deprecation class TestComparisonsMixed1(TestComparisons): """Ticket #1276""" @@ -207,6 +217,7 @@ def B(self): [['efg', 'efg', '123 '], ['051', 'efgg', 'tuv']], np.str_).view(np.char.chararray) +@ignore_charray_deprecation class TestComparisonsMixed2(TestComparisons): """Ticket #1276""" @@ -215,6 +226,7 @@ def A(self): [['abc', 'abcc', '123'], ['789', 'abc', 'xyz']], np.str_).view(np.char.chararray) +@ignore_charray_deprecation class TestInformation: def A(self): return np.array([[' abc ', ''], @@ -309,12 +321,18 @@ def test_islower(self): def test_isspace(self): A = self.A() assert_(issubclass(A.isspace().dtype.type, np.bool)) - assert_array_equal(A.isspace(), [[False, False], [False, False], [False, False]]) + assert_array_equal( + A.isspace(), + [[False, False], [False, False], [False, False]], + ) def test_istitle(self): A = self.A() assert_(issubclass(A.istitle().dtype.type, np.bool)) - assert_array_equal(A.istitle(), [[False, False], [False, False], [False, False]]) + assert_array_equal( + A.istitle(), + [[False, False], [False, False], [False, False]], + ) def test_isupper(self): A = self.A() @@ -350,6 +368,7 @@ def fail(): assert_raises(TypeError, fail) +@ignore_charray_deprecation class TestMethods: def A(self): return np.array([[' abc ', ''], @@ -685,6 +704,7 @@ def fail(): assert_array_equal(B.isdecimal(), [ [False, False], [True, False], [False, False]]) +@ignore_charray_deprecation class TestOperations: def A(self): return np.array([['abc', '123'], @@ -850,6 +870,7 @@ def test_replace(self): assert_equal(np.char.replace('Python is good', 'good', 'great'), 'Python is great') +@ignore_charray_deprecation def test_empty_indexing(): """Regression test for ticket 1948.""" # Check that indexing a chararray with an empty list/array returns an diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 7cb1fee9b890..0a579957e214 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -5,11 +5,11 @@ """ import contextlib import warnings +from collections.abc import Callable import pytest import numpy as np -import numpy._core._struct_ufunc_tests as struct_ufunc from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 from numpy.testing import assert_raises @@ -87,9 +87,8 @@ def assert_deprecated(self, function, num=1, ignore_others=False, if warning.category is self.warning_cls: num_found += 1 elif not ignore_others: - raise AssertionError( - "expected %s but got: %s" % - (self.warning_cls.__name__, warning.category)) + name = self.warning_cls.__name__ + raise AssertionError(f"expected {name} but got: {warning.category}") if num is not None and num_found != num: msg = f"{len(w_context)} warnings found but {num} expected." lst = [str(w) for w in w_context] @@ -137,59 +136,6 @@ def foo(): test_case_instance.assert_deprecated(foo) -class TestBincount(_DeprecationTestCase): - # 2024-07-29, 2.1.0 - @pytest.mark.parametrize('badlist', [[0.5, 1.2, 1.5], - ['0', '1', '1']]) - def test_bincount_bad_list(self, badlist): - self.assert_deprecated(lambda: np.bincount(badlist)) - - -class BuiltInRoundComplexDType(_DeprecationTestCase): - # 2020-03-31 1.19.0 - deprecated_types = [np.csingle, np.cdouble, np.clongdouble] - not_deprecated_types = [ - np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64, - np.float16, np.float32, np.float64, - ] - - def test_deprecated(self): - for scalar_type in self.deprecated_types: - scalar = scalar_type(0) - self.assert_deprecated(round, args=(scalar,)) - self.assert_deprecated(round, args=(scalar, 0)) - self.assert_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) - - def test_not_deprecated(self): - for scalar_type in self.not_deprecated_types: - scalar = scalar_type(0) - self.assert_not_deprecated(round, args=(scalar,)) - self.assert_not_deprecated(round, args=(scalar, 0)) - self.assert_not_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) - - -class FlatteningConcatenateUnsafeCast(_DeprecationTestCase): - # NumPy 1.20, 2020-09-03 - message = "concatenate with `axis=None` will use same-kind casting" - - def test_deprecated(self): - self.assert_deprecated(np.concatenate, - args=(([0.], [1.]),), - kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64)}) - - def test_not_deprecated(self): - self.assert_not_deprecated(np.concatenate, - args=(([0.], [1.]),), - kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64), - 'casting': "unsafe"}) - - with assert_raises(TypeError): - # Tests should notice if the deprecation warning is given first... - np.concatenate(([0.], [1.]), out=np.empty(2, dtype=np.int64), - casting="same_kind") - - class TestCtypesGetter(_DeprecationTestCase): ctypes = np.array([1]).ctypes @@ -260,57 +206,17 @@ def test_attributeerror_includes_info(self, name): getattr(np, name) -class TestDeprecatedFinfo(_DeprecationTestCase): - # Deprecated in NumPy 1.25, 2023-01-16 - def test_deprecated_none(self): - self.assert_deprecated(np.finfo, args=(None,)) - - -class TestMathAlias(_DeprecationTestCase): - def test_deprecated_np_lib_math(self): - self.assert_deprecated(lambda: np.lib.math) - - -class TestLibImports(_DeprecationTestCase): - # Deprecated in Numpy 1.26.0, 2023-09 - def test_lib_functions_deprecation_call(self): - from numpy import row_stack - from numpy._core.numerictypes import maximum_sctype - from numpy.lib._npyio_impl import recfromcsv, recfromtxt - from numpy.lib._shape_base_impl import get_array_wrap - from numpy.lib._utils_impl import safe_eval - from numpy.lib.tests.test_io import TextIO - - self.assert_deprecated(lambda: safe_eval("None")) - - data_gen = lambda: TextIO('A,B\n0,1\n2,3') - kwargs = {'delimiter': ",", 'missing_values': "N/A", 'names': True} - self.assert_deprecated(lambda: recfromcsv(data_gen())) - self.assert_deprecated(lambda: recfromtxt(data_gen(), **kwargs)) - - self.assert_deprecated(get_array_wrap) - self.assert_deprecated(lambda: maximum_sctype(int)) - - self.assert_deprecated(lambda: row_stack([[]])) - self.assert_deprecated(lambda: np.chararray) +class TestCharArray(_DeprecationTestCase): + def test_deprecated_chararray(self): + self.assert_deprecated(lambda: np.char.chararray) class TestDeprecatedDTypeAliases(_DeprecationTestCase): - - def _check_for_warning(self, func): - with pytest.warns(DeprecationWarning, - match="alias 'a' was deprecated in NumPy 2.0") as w: - func() - assert len(w) == 1 - - def test_a_dtype_alias(self): - for dtype in ["a", "a10"]: - f = lambda: np.dtype(dtype) - self._check_for_warning(f) - self.assert_deprecated(f) - f = lambda: np.array(["hello", "world"]).astype("a10") - self._check_for_warning(f) - self.assert_deprecated(f) + @pytest.mark.parametrize("dtype_code", ["a", "a10"]) + def test_a_dtype_alias(self, dtype_code: str): + # Deprecated in 2.0, removed in 2.5, 2025-12 + with pytest.raises(TypeError): + np.dtype(dtype_code) class TestDeprecatedArrayWrap(_DeprecationTestCase): @@ -337,6 +243,7 @@ def __array_wrap__(self, arr): self.assert_deprecated(lambda: np.negative(test2)) assert test2.called + class TestDeprecatedArrayAttributeSetting(_DeprecationTestCase): message = "Setting the .*on a NumPy array has been deprecated.*" @@ -344,6 +251,33 @@ def test_deprecated_strides_set(self): x = np.eye(2) self.assert_deprecated(setattr, args=(x, 'strides', x.strides)) + def test_deprecated_dtype_set(self): + x = np.eye(2) + self.assert_deprecated(setattr, args=(x, "dtype", int)) + + def test_deprecated_shape_set(self): + x = np.eye(2) + self.assert_deprecated(setattr, args=(x, "shape", (4, 1))) + + +class TestDeprecatedViewDtypePropertySetter(_DeprecationTestCase): + # view() with dtype change on a subclass that overrides the + # dtype property should warn to implement _set_dtype instead. + message = r"numpy.ndarray.view\(\) used a custom `dtype` setter.*" + + def test_view_dtype_property_setter(self): + class MyArray(np.ndarray): + @property + def dtype(self): + return super().dtype + + @dtype.setter + def dtype(self, dtype): + super(MyArray, type(self))._set_dtype(self, dtype) + + arr = np.arange(6).view(MyArray) + self.assert_deprecated(arr.view, args=(np.float64,)) + class TestDeprecatedDTypeParenthesizedRepeatCount(_DeprecationTestCase): message = "Passing in a parenthesized single number" @@ -353,26 +287,6 @@ def test_parenthesized_repeat_count(self, string): self.assert_deprecated(np.dtype, args=(string,)) -class TestAddNewdocUFunc(_DeprecationTestCase): - # Deprecated in Numpy 2.2, 2024-11 - @pytest.mark.thread_unsafe( - reason="modifies and checks docstring which is global state" - ) - def test_deprecated(self): - doc = struct_ufunc.add_triplet.__doc__ - # gh-26718 - # This test mutates the C-level docstring pointer for add_triplet, - # which is permanent once set. Skip when re-running tests. - if doc is not None and "new docs" in doc: - pytest.skip("Cannot retest deprecation, otherwise ValueError: " - "Cannot change docstring of ufunc with non-NULL docstring") - self.assert_deprecated( - lambda: np._core.umath._add_newdoc_ufunc( - struct_ufunc.add_triplet, "new docs" - ) - ) - - class TestDTypeAlignBool(_VisibleDeprecationTestCase): # Deprecated in Numpy 2.4, 2025-07 # NOTE: As you can see, finalizing this deprecation breaks some (very) old @@ -386,11 +300,18 @@ def test_deprecated(self): # alignment, or pass them accidentally as a subarray shape (meaning to pass # a tuple). self.assert_deprecated(lambda: np.dtype("f8", align=3)) + self.assert_deprecated(lambda: np.dtype("f8", align=0, copy=10**100)) + self.assert_deprecated(lambda: np.dtype("f8", align=10**100, copy=0)) + # Subclasses of ints don't hit the below pickle code path: + self.assert_deprecated( + lambda: np.dtype("f8", align=np.long(0), copy=np.long(1))) @pytest.mark.parametrize("align", [True, False, np.True_, np.False_]) def test_not_deprecated(self, align): # if the user passes a bool, it is accepted. self.assert_not_deprecated(lambda: np.dtype("f8", align=align)) + # The following specific case is used by old pickles: + self.assert_not_deprecated(lambda: np.dtype("f8", align=0, copy=1)) class TestFlatiterIndexing0dBoolIndex(_DeprecationTestCase): @@ -456,5 +377,101 @@ class TestTooManyArgsExtremum(_DeprecationTestCase): message = "Passing more than 2 positional arguments to np.maximum and np.minimum " @pytest.mark.parametrize("ufunc", [np.minimum, np.maximum]) - def test_extremem_3_args(self, ufunc): + def test_extremum_3_args(self, ufunc): self.assert_deprecated(ufunc, args=(np.ones(1), np.zeros(1), np.empty(1))) + + +class TestTypenameDeprecation(_DeprecationTestCase): + # Deprecation in Numpy 2.5, 2026-02 + + def test_typename_emits_deprecation_warning(self): + self.assert_deprecated(lambda: np.typename("S1")) + self.assert_deprecated(lambda: np.typename("h")) + +class TestRoundDeprecation(_DeprecationTestCase): + # Deprecation in NumPy 2.5, 2026-02 + + def test_round_emits_deprecation_warning_array(self): + a = np.array([1.5, 2.7, -1.5, -2.7]) + self.assert_deprecated(lambda: np.ma.round_(a)) + + def test_round_emits_deprecation_warning_scalar(self): + self.assert_deprecated(lambda: np.ma.round_(3.14)) + + +class TestDeprecatedGenericTimedelta(_DeprecationTestCase): + # Deprecated in Numpy 2.5, 2025-11 + # See gh-29619 + message = "The 'generic' unit for NumPy timedelta is deprecated" + + @pytest.mark.parametrize('value', [ + 3, 10, "NaT" + ]) + def test_raise_warning_for_timedelta_with_generic_unit(self, value: int | str): + self.assert_deprecated(lambda x: np.timedelta64(x), args=(value,)) + + @pytest.mark.parametrize('value', [ + np.timedelta64(3, "s"), np.timedelta64(10, "D") + ]) + @pytest.mark.parametrize('generic_value', [ + 5, 2 + ]) + @pytest.mark.parametrize( + "op", + [ + np.add, + np.subtract, + ], + ) + def test_raise_warning_for_operation_with_generic_unit( + self, value: int, generic_value: int, op: Callable + ): + self.assert_deprecated(op, args=(value, generic_value)) + + def test_raise_warning_for_default_constructor(self): + self.assert_deprecated(lambda: np.timedelta64()) + self.assert_deprecated(lambda: np.datetime64()) + + def test_raise_warning_for_NAT_construction(self): + self.assert_deprecated(lambda: np.datetime64('NaT')) + self.assert_deprecated(lambda: np.datetime64(None)) + + +class TestTriDeprecationWithNonInteger(_DeprecationTestCase): + # Deprecation in NumPy 2.5, 2026-03 + + def test_tri(self): + self.assert_deprecated(lambda: np.tri(M=2.3, k=3.14, N=np.object_(8))) + + def test_triu_indices(self): + self.assert_deprecated(lambda: np.triu_indices(n=np.float64(7.14), k=3.2)) + self.assert_deprecated(lambda: np.triu_indices(n=4, k=np.bool(0))) + + def test_tril_indices(self): + self.assert_deprecated(lambda: np.tril_indices(n=np.array(3.14))) + + def test_triu_indices_from(self): + a = np.array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + self.assert_deprecated(lambda: np.triu_indices_from(a, k=np.object_(9.8))) + + def test_tril_indices_from(self): + a = np.array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + self.assert_deprecated(lambda: np.tril_indices_from(a, k=9.8)) + + +class TestTakeOutDtype(_DeprecationTestCase): + # Deprecated in Numpy 2.5, 2026-01 + message = "Implicit casting of output to a different kind." + + def test_out_dtype_deprecated(self): + a = np.arange(3).astype(np.int32) + indices = np.arange(2) + different_dtype_out = np.zeros_like(indices, dtype=np.uint32) + + self.assert_deprecated(lambda: np.take(a, indices, out=different_dtype_out)) diff --git a/numpy/_core/tests/test_dlpack.py b/numpy/_core/tests/test_dlpack.py index 89c24032b6c1..239f34559cef 100644 --- a/numpy/_core/tests/test_dlpack.py +++ b/numpy/_core/tests/test_dlpack.py @@ -3,7 +3,7 @@ import pytest import numpy as np -from numpy.testing import IS_PYPY, assert_array_equal +from numpy.testing import assert_array_equal def new_and_old_dlpack(): @@ -18,7 +18,6 @@ def __dlpack__(self, stream=None): class TestDLPack: - @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") @pytest.mark.parametrize("max_version", [(0, 0), None, (1, 0), (100, 3)]) def test_dunder_dlpack_refcount(self, max_version): x = np.arange(5) @@ -54,7 +53,6 @@ def test_strides_not_multiple_of_itemsize(self): with pytest.raises(BufferError): np.from_dlpack(z) - @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") @pytest.mark.parametrize("arr", new_and_old_dlpack()) def test_from_dlpack_refcount(self, arr): arr = arr.copy() @@ -184,7 +182,7 @@ def test_device(self): np.from_dlpack(x, device="cpu") np.from_dlpack(x, device=None) - with pytest.raises(ValueError): + with pytest.raises(BufferError): x.__dlpack__(dl_device=(10, 0)) with pytest.raises(ValueError): np.from_dlpack(x, device="gpu") diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 8819262c4e21..6464ccd61f9d 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1,10 +1,12 @@ +import contextlib import ctypes -import gc import inspect import operator +import os import pickle import sys import types +import warnings from itertools import permutations from typing import Any @@ -19,14 +21,12 @@ from numpy.testing import ( HAS_REFCOUNT, IS_64BIT, - IS_PYPY, - IS_PYSTON, - IS_WASM, assert_, assert_array_equal, assert_equal, assert_raises, ) +from numpy.testing._private.utils import requires_deep_recursion def assert_dtype_equal(a, b): @@ -779,7 +779,7 @@ def test_subarray_cast_copies(self): arr = np.ones(3, dtype=[("f", "i", 3)]) cast = arr.astype(object) for fields in cast: - assert type(fields) == tuple and len(fields) == 1 + assert type(fields) is tuple and len(fields) == 1 subarr = fields[0] assert subarr.base is None assert subarr.flags.owndata @@ -822,115 +822,6 @@ def iter_struct_object_dtypes(): yield pytest.param(dt, p, 12, obj, id="") -@pytest.mark.skipif( - sys.version_info >= (3, 12), - reason="Python 3.12 has immortal refcounts, this test will no longer " - "work. See gh-23986" -) -@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") -class TestStructuredObjectRefcounting: - """These tests cover various uses of complicated structured types which - include objects and thus require reference counting. - """ - @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], - iter_struct_object_dtypes()) - @pytest.mark.parametrize(["creation_func", "creation_obj"], [ - pytest.param(np.empty, None, - # None is probably used for too many things - marks=pytest.mark.skip("unreliable due to python's behaviour")), - (np.ones, 1), - (np.zeros, 0)]) - def test_structured_object_create_delete(self, dt, pat, count, singleton, - creation_func, creation_obj): - """Structured object reference counting in creation and deletion""" - # The test assumes that 0, 1, and None are singletons. - gc.collect() - before = sys.getrefcount(creation_obj) - arr = creation_func(3, dt) - - now = sys.getrefcount(creation_obj) - assert now - before == count * 3 - del arr - now = sys.getrefcount(creation_obj) - assert now == before - - @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], - iter_struct_object_dtypes()) - def test_structured_object_item_setting(self, dt, pat, count, singleton): - """Structured object reference counting for simple item setting""" - one = 1 - - gc.collect() - before = sys.getrefcount(singleton) - arr = np.array([pat] * 3, dt) - assert sys.getrefcount(singleton) - before == count * 3 - # Fill with `1` and check that it was replaced correctly: - before2 = sys.getrefcount(one) - arr[...] = one - after2 = sys.getrefcount(one) - assert after2 - before2 == count * 3 - del arr - gc.collect() - assert sys.getrefcount(one) == before2 - assert sys.getrefcount(singleton) == before - - @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], - iter_struct_object_dtypes()) - @pytest.mark.parametrize( - ['shape', 'index', 'items_changed'], - [((3,), ([0, 2],), 2), - ((3, 2), ([0, 2], slice(None)), 4), - ((3, 2), ([0, 2], [1]), 2), - ((3,), ([True, False, True]), 2)]) - def test_structured_object_indexing(self, shape, index, items_changed, - dt, pat, count, singleton): - """Structured object reference counting for advanced indexing.""" - # Use two small negative values (should be singletons, but less likely - # to run into race-conditions). This failed in some threaded envs - # When using 0 and 1. If it fails again, should remove all explicit - # checks, and rely on `pytest-leaks` reference count checker only. - val0 = -4 - val1 = -5 - - arr = np.full(shape, val0, dt) - - gc.collect() - before_val0 = sys.getrefcount(val0) - before_val1 = sys.getrefcount(val1) - # Test item getting: - part = arr[index] - after_val0 = sys.getrefcount(val0) - assert after_val0 - before_val0 == count * items_changed - del part - # Test item setting: - arr[index] = val1 - gc.collect() - after_val0 = sys.getrefcount(val0) - after_val1 = sys.getrefcount(val1) - assert before_val0 - after_val0 == count * items_changed - assert after_val1 - before_val1 == count * items_changed - - @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], - iter_struct_object_dtypes()) - def test_structured_object_take_and_repeat(self, dt, pat, count, singleton): - """Structured object reference counting for specialized functions. - The older functions such as take and repeat use different code paths - then item setting (when writing this). - """ - indices = [0, 1] - - arr = np.array([pat] * 3, dt) - gc.collect() - before = sys.getrefcount(singleton) - res = arr.take(indices) - after = sys.getrefcount(singleton) - assert after - before == count * 2 - new = res.repeat(10) - gc.collect() - after_repeat = sys.getrefcount(singleton) - assert after_repeat - after == count * 2 * 10 - - class TestStructuredDtypeSparseFields: """Tests subarray fields which contain sparse dtypes so that not all memory is used by the dtype work. Such dtype's should @@ -977,25 +868,24 @@ def test1(self): ('yi', np.dtype((a, (3, 2))))]) assert_dtype_equal(c, d) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_list_recursion(self): l = [] l.append(('f', l)) with pytest.raises(RecursionError): np.dtype(l) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_tuple_recursion(self): d = np.int32 for i in range(100000): d = (d, (1,)) - with pytest.raises(RecursionError): + # depending on OS and Python version, this might succeed + # see gh-30370 and cpython issue #142253 + with contextlib.suppress(RecursionError): np.dtype(d) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_dict_recursion(self): d = {"names": ['self'], "formats": [None], "offsets": [0]} d['formats'][0] = d @@ -1227,7 +1117,10 @@ def test_zero_stride(self): arr = np.broadcast_to(arr, 10) assert arr.strides == (0,) with pytest.raises(ValueError): - arr.dtype = "i1" + with warnings.catch_warnings(): # gh-28901 + warnings.filterwarnings(action="ignore", + category=DeprecationWarning) + arr.dtype = "i1" class TestDTypeMakeCanonical: def check_canonical(self, dtype, canonical): @@ -1310,7 +1203,7 @@ def test_simple(self): def test_object_flag_not_inherited(self): # The following dtype still indicates "object", because its included - # in the unaccessible space (maybe this could change at some point): + # in the inaccessible space (maybe this could change at some point): arr = np.ones(3, "i,O,i")[["f0", "f2"]] assert arr.dtype.hasobject canonical_dt = np.result_type(arr.dtype) @@ -1452,6 +1345,29 @@ def test_pickle_dtype(self, dt): assert roundtrip_dt == dt assert hash(roundtrip_dt) == pre_pickle_hash + @pytest.mark.parametrize('dt', [ + np.dtype([('a', 'i4'), ('b', 'f8')]), + np.dtype('i4, i1', align=True), + ]) + def test_setstate_invalid_tuple_size(self, dt): + # gh-30476 + valid_state = dt.__reduce__()[2] + dt.__setstate__(valid_state) + + for size in [1, 2, 3, 4]: + with pytest.raises( + ValueError, match="Invalid state while unpickling" + ): + dt.__setstate__(valid_state[:size]) + + min_extra = 10 - len(valid_state) + for extra in range(min_extra, min_extra + 5): + extended = valid_state + (None,) * extra + with pytest.raises( + ValueError, match="Invalid state while unpickling" + ): + dt.__setstate__(extended) + class TestPromotion: """Test cases related to more complex DType promotions. Further promotion @@ -1607,6 +1523,7 @@ class dt: with pytest.raises(ValueError): np.dtype(dt_instance) + @pytest.mark.xfail("LSAN_OPTIONS" in os.environ, reason="known leak", run=False) def test_void_subtype(self): class dt(np.void): # This code path is fully untested before, so it is unclear @@ -1836,7 +1753,12 @@ class Union(ctypes.Union): ] expected = np.dtype({ "names": ['a', 'b', 'c', 'd'], - "formats": ['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]], + "formats": [ + 'u1', + np.uint16, + np.uint32, + [('one', 'u1'), ('two', np.uint32)], + ], "offsets": [0, 0, 0, 0], "itemsize": ctypes.sizeof(Union) }) @@ -1860,7 +1782,12 @@ class Union(ctypes.Union): ] expected = np.dtype({ "names": ['a', 'b', 'c', 'd'], - "formats": ['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]], + "formats": [ + 'u1', + np.uint16, + np.uint32, + [('one', 'u1'), ('two', np.uint32)], + ], "offsets": [0, 0, 0, 0], "itemsize": ctypes.sizeof(Union) }) @@ -1892,7 +1819,15 @@ class PackedStructure(ctypes.Structure): ('g', ctypes.c_uint8) ] expected = np.dtype({ - "formats": [np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8], + "formats": [ + np.uint8, + np.uint16, + np.uint8, + np.uint16, + np.uint32, + np.uint32, + np.uint8, + ], "offsets": [0, 2, 4, 6, 8, 12, 16], "names": ['a', 'b', 'c', 'd', 'e', 'f', 'g'], "itemsize": 18}) @@ -1965,7 +1900,10 @@ def test_pairs(self, pair): class TestUserDType: @pytest.mark.leaks_references(reason="dynamically creates custom dtype.") - @pytest.mark.thread_unsafe(reason="crashes when GIL disabled, dtype setup is thread-unsafe") + @pytest.mark.thread_unsafe( + reason="crashes when GIL disabled, dtype setup is thread-unsafe", + ) + @pytest.mark.xfail("LSAN_OPTIONS" in os.environ, reason="known leak", run=False) def test_custom_structured_dtype(self): class mytype: pass @@ -1986,7 +1924,10 @@ class mytype: del a assert sys.getrefcount(o) == startcount - @pytest.mark.thread_unsafe(reason="crashes when GIL disabled, dtype setup is thread-unsafe") + @pytest.mark.thread_unsafe( + reason="crashes when GIL disabled, dtype setup is thread-unsafe", + ) + @pytest.mark.xfail("LSAN_OPTIONS" in os.environ, reason="known leak", run=False) def test_custom_structured_dtype_errors(self): class mytype: pass @@ -2034,9 +1975,13 @@ def test_subscript_scalar(self) -> None: def test_result_type_integers_and_unitless_timedelta64(): # Regression test for gh-20077. The following call of `result_type` # would cause a seg. fault. - td = np.timedelta64(4) - result = np.result_type(0, td) - assert_dtype_equal(result, td.dtype) + with pytest.warns( + DeprecationWarning, + match="The 'generic' unit for NumPy timedelta is deprecated", + ): + td = np.timedelta64(4) + result = np.result_type(0, td) + assert_dtype_equal(result, td.dtype) def test_creating_dtype_with_dtype_class_errors(): @@ -2046,7 +1991,6 @@ def test_creating_dtype_with_dtype_class_errors(): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") class TestDTypeSignatures: def test_signature_dtype(self): sig = inspect.signature(np.dtype) diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 375ef03c1dd7..6f4d38b8a97a 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -778,15 +778,21 @@ def __mul__(self, other): assert np.einsum("i,j", objMult, objMult) == 42 def test_subscript_range(self): - # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used - # when creating a subscript from arrays + # Issue #7741, make sure that all letters of Latin alphabet + # (both uppercase & lowercase) can be used when creating a subscript from arrays a = np.ones((2, 3)) b = np.ones((3, 4)) np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False) np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False) np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False) - assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False)) - assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False)) + assert_raises( + ValueError, + lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False), + ) + assert_raises( + ValueError, + lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False), + ) def test_einsum_broadcast(self): # Issue #2455 change in handling ellipsis @@ -802,7 +808,8 @@ def test_einsum_broadcast(self): for opt in [True, False]: assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref) assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref) - assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error + # used to raise error + assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) A = np.arange(12).reshape((4, 3)) B = np.arange(6).reshape((3, 2)) @@ -810,8 +817,9 @@ def test_einsum_broadcast(self): for opt in [True, False]: assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref) assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref) - assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error - assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error + # used to raise error + assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) + assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) dims = [2, 3, 4, 5] a = np.arange(np.prod(dims)).reshape(dims) @@ -819,16 +827,17 @@ def test_einsum_broadcast(self): ref = np.einsum('ijkl,k->ijl', a, v, optimize=False) for opt in [True, False]: assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref) - assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref) + # used to raise error + assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) J, K, M = 160, 160, 120 A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M) B = np.arange(J * K * M * 3).reshape(J, K, M, 3) ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False) for opt in [True, False]: - assert_equal(np.einsum('...lmn,lmno->...o', A, B, - optimize=opt), ref) # used to raise error + # used to raise error + assert_equal(np.einsum("...lmn,lmno->...o", A, B, optimize=opt), ref) def test_einsum_fixedstridebug(self): # Issue #4485 obscure einsum bug @@ -1175,7 +1184,7 @@ def assert_path_equal(self, comp, benchmark): ret &= (comp[pos + 1] == benchmark[pos + 1]) assert_(ret) - def test_memory_contraints(self): + def test_memory_constraints(self): # Ensure memory constraints are satisfied outer_test = self.build_operands('a,b,c->abc') diff --git a/numpy/_core/tests/test_function_base.py b/numpy/_core/tests/test_function_base.py index c6e10397b3ff..b78c79a6f032 100644 --- a/numpy/_core/tests/test_function_base.py +++ b/numpy/_core/tests/test_function_base.py @@ -21,7 +21,6 @@ from numpy._core import sctypes from numpy._core.function_base import add_newdoc from numpy.testing import ( - IS_PYPY, assert_, assert_allclose, assert_array_equal, @@ -487,7 +486,6 @@ def test_any_step_zero_and_not_mult_inplace(self): class TestAdd_newdoc: @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") - @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") def test_add_doc(self): # test that np.add_newdoc did attach a docstring successfully: tgt = "Current flat index into the array." diff --git a/numpy/_core/tests/test_getlimits.py b/numpy/_core/tests/test_getlimits.py index 4e911b89e89f..786fd1d494e4 100644 --- a/numpy/_core/tests/test_getlimits.py +++ b/numpy/_core/tests/test_getlimits.py @@ -83,6 +83,9 @@ class NonHashableWithDtype: x = NonHashableWithDtype() assert np.finfo(x) == np.finfo(x.dtype) + def test_no_none_sense(self): + assert_raises(TypeError, finfo, None) + class TestIinfo: def test_basic(self): diff --git a/numpy/_core/tests/test_half.py b/numpy/_core/tests/test_half.py index 3ced5b466a44..85ef0f22b3f1 100644 --- a/numpy/_core/tests/test_half.py +++ b/numpy/_core/tests/test_half.py @@ -298,19 +298,15 @@ def test_half_correctness(self): if len(a32_fail) != 0: bad_index = a32_fail[0] assert_equal(finite_f32, a_manual, - "First non-equal is half value 0x%x -> %g != %g" % - (a_bits[bad_index], - finite_f32[bad_index], - a_manual[bad_index])) + f"First non-equal is half value 0x{a_bits[bad_index]:x} -> " + f"{finite_f32[bad_index]:g} != {a_manual[bad_index]:g}") a64_fail = np.nonzero(finite_f64 != a_manual)[0] if len(a64_fail) != 0: bad_index = a64_fail[0] assert_equal(finite_f64, a_manual, - "First non-equal is half value 0x%x -> %g != %g" % - (a_bits[bad_index], - finite_f64[bad_index], - a_manual[bad_index])) + f"First non-equal is half value 0x{a_bits[bad_index]:x} -> " + f"{finite_f64[bad_index]:g} != {a_manual[bad_index]:g}") def test_half_ordering(self): """Make sure comparisons are working right""" diff --git a/numpy/_core/tests/test_hashtable.py b/numpy/_core/tests/test_hashtable.py index 25a7158aaf6f..6c9631b8ebfb 100644 --- a/numpy/_core/tests/test_hashtable.py +++ b/numpy/_core/tests/test_hashtable.py @@ -1,36 +1,156 @@ import random +from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor import pytest -from numpy._core._multiarray_tests import identityhash_tester +from numpy._core._multiarray_tests import ( + create_identity_hash, + identity_hash_get_item, + identity_hash_set_item_default, +) +from numpy.testing import IS_WASM @pytest.mark.parametrize("key_length", [1, 3, 6]) @pytest.mark.parametrize("length", [1, 16, 2000]) -def test_identity_hashtable(key_length, length): - # use a 30 object pool for everything (duplicates will happen) - pool = [object() for i in range(20)] +def test_identity_hashtable_get_set(key_length, length): + # no collisions expected keys_vals = [] for i in range(length): - keys = tuple(random.choices(pool, k=key_length)) - keys_vals.append((keys, random.choice(pool))) + keys = tuple(object() for _ in range(key_length)) + keys_vals.append((keys, object())) - dictionary = dict(keys_vals) + ht = create_identity_hash(key_length) - # add a random item at the end: - keys_vals.append(random.choice(keys_vals)) - # the expected one could be different with duplicates: - expected = dictionary[keys_vals[-1][0]] + for i in range(length): + key, value = keys_vals[i] + assert identity_hash_set_item_default(ht, key, value) is value + + for key, value in keys_vals: + got = identity_hash_get_item(ht, key) + assert got is value + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +@pytest.mark.parametrize("key_length", [1, 3, 6]) +def test_identity_hashtable_default_thread_safety(key_length): + ht = create_identity_hash(key_length) + + key = tuple(object() for _ in range(key_length)) + val1 = object() + val2 = object() + + got1 = identity_hash_set_item_default(ht, key, val1) + assert got1 is val1 + + def thread_func(val): + return identity_hash_set_item_default(ht, key, val) + + with ThreadPoolExecutor(max_workers=8) as executor: + futures = [executor.submit(thread_func, val2) for _ in range(8)] + results = [f.result() for f in futures] + + assert all(r is val1 for r in results) + + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +@pytest.mark.parametrize("key_length", [1, 3, 6]) +def test_identity_hashtable_set_thread_safety(key_length): + ht = create_identity_hash(key_length) + + key = tuple(object() for _ in range(key_length)) + val1 = object() + + def thread_func(val): + return identity_hash_set_item_default(ht, key, val) + + with ThreadPoolExecutor(max_workers=8) as executor: + futures = [executor.submit(thread_func, val1) for _ in range(100)] + results = [f.result() for f in futures] + + assert all(r is val1 for r in results) + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +@pytest.mark.parametrize("key_length", [1, 3, 6]) +def test_identity_hashtable_get_thread_safety(key_length): + ht = create_identity_hash(key_length) + key = tuple(object() for _ in range(key_length)) + value = object() + identity_hash_set_item_default(ht, key, value) + + def thread_func(): + return identity_hash_get_item(ht, key) + + with ThreadPoolExecutor(max_workers=8) as executor: + futures = [executor.submit(thread_func) for _ in range(100)] + results = [f.result() for f in futures] + + assert all(r is value for r in results) + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +@pytest.mark.parametrize("key_length", [1, 3, 6]) +@pytest.mark.parametrize("length", [1 << 4, 1 << 8, 1 << 12]) +def test_identity_hashtable_get_set_concurrent(key_length, length): + ht = create_identity_hash(key_length) + keys_vals = [] + for i in range(length): + keys = tuple(object() for _ in range(key_length)) + keys_vals.append((keys, object())) + + def set_item(kv): + key, value = kv + got = identity_hash_set_item_default(ht, key, value) + assert got is value + + def get_item(kv): + key, value = kv + got = identity_hash_get_item(ht, key) + assert got is None or got is value + + with ThreadPoolExecutor(max_workers=8) as executor: + futures = [] + for kv in keys_vals: + futures.append(executor.submit(set_item, kv)) + futures.append(executor.submit(get_item, kv)) + for future in futures: + future.result() + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +@pytest.mark.parametrize("key_length", [3, 6, 10]) +@pytest.mark.parametrize("length", [1 << 4, 1 << 8, 1 << 12]) +def test_identity_hashtable_get_set_concurrent_collisions(key_length, length): + ht = create_identity_hash(key_length) + base_key = tuple(object() for _ in range(key_length - 1)) + keys_vals = defaultdict(list) + for i in range(length): + keys = base_key + (random.choice(base_key), ) + keys_vals[keys].append(object()) + + set_item_results = defaultdict(set) + + def set_item(kv): + key, values = kv + value = random.choice(values) + got = identity_hash_set_item_default(ht, key, value) + set_item_results[key].add(got) + + get_item_results = defaultdict(set) - res = identityhash_tester(key_length, keys_vals, replace=True) - assert res is expected + def get_item(kv): + key, values = kv + got = identity_hash_get_item(ht, key) + get_item_results[key].add(got) - if length == 1: - return + with ThreadPoolExecutor(max_workers=8) as executor: + futures = [] + for keys, values in keys_vals.items(): + futures.append(executor.submit(set_item, (keys, values))) + futures.append(executor.submit(get_item, (keys, values))) + for future in futures: + future.result() - # add a new item with a key that is already used and a new value, this - # should error if replace is False, see gh-26690 - new_key = (keys_vals[1][0], object()) - keys_vals[0] = new_key - with pytest.raises(RuntimeError): - identityhash_tester(key_length, keys_vals) + for key in keys_vals.keys(): + assert len(set_item_results[key]) == 1 + set_item_value = set_item_results[key].pop() + for r in get_item_results[key]: + assert r is None or r is set_item_value diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index 65d42d6c9370..7a8cd42c59aa 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -12,7 +12,6 @@ from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning from numpy.testing import ( HAS_REFCOUNT, - IS_PYPY, assert_, assert_array_equal, assert_equal, @@ -1677,7 +1676,6 @@ def test_nonempty_string_flat_index_on_flatiter(self): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") @pytest.mark.parametrize("methodname", ["__array__", "copy"]) def test_flatiter_method_signatures(methodname: str): method = getattr(np.flatiter, methodname) diff --git a/numpy/_core/tests/test_item_selection.py b/numpy/_core/tests/test_item_selection.py index 79fb82dde591..2d9caaf8061c 100644 --- a/numpy/_core/tests/test_item_selection.py +++ b/numpy/_core/tests/test_item_selection.py @@ -29,8 +29,8 @@ def test_simple(self): tresult = list(ta.T.copy()) for index_array in index_arrays: if index_array.size != 0: - tresult[0].shape = (2,) + index_array.shape - tresult[1].shape = (2,) + index_array.shape + tresult[0] = tresult[0].reshape((2,) + index_array.shape) + tresult[1] = tresult[1].reshape((2,) + index_array.shape) for mode in modes: for index in indices: real_index = real_indices[mode][index] @@ -81,6 +81,17 @@ def test_empty_partition(self): assert_array_equal(a, a_original) + @pytest.mark.parametrize("dtype", + [np.int8, np.int16, np.int32, np.int64, + np.float16, np.float32, np.float64, np.longdouble]) + def test_out_dtype(self, dtype): + # In reference to github issue #25588 + a = np.arange(3).astype(np.int32) + indices = np.arange(2) + out = np.zeros_like(indices, dtype=dtype) + np.take(a, indices, out=out) + assert_array_equal(a[indices], out) + def test_empty_argpartition(self): # In reference to github issue #6530 a = np.array([0, 2, 4, 6, 8, 10]) diff --git a/numpy/_core/tests/test_limited_api.py b/numpy/_core/tests/test_limited_api.py index 984210e53af7..30ed3023cc92 100644 --- a/numpy/_core/tests/test_limited_api.py +++ b/numpy/_core/tests/test_limited_api.py @@ -5,7 +5,7 @@ import pytest -from numpy.testing import IS_EDITABLE, IS_PYPY, IS_WASM, NOGIL_BUILD +from numpy.testing import IS_EDITABLE, IS_WASM, NOGIL_BUILD # This import is copied from random.tests.test_extending try: @@ -91,7 +91,6 @@ def install_temp(tmpdir_factory): NOGIL_BUILD, reason="Py_GIL_DISABLED builds do not currently support the limited API", ) -@pytest.mark.skipif(IS_PYPY, reason="no support for limited API in PyPy") def test_limited_api(install_temp): """Test building a third-party C extension with the limited API and building a cython extension with the limited API diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index 313d3efe779a..720ea1aa91b8 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -343,9 +343,6 @@ async def async_test_context_locality(get_module): def test_context_locality(get_module): - if (sys.implementation.name == 'pypy' - and sys.pypy_version_info[:3] < (7, 3, 6)): - pytest.skip('no context-locality support in PyPy < 7.3.6') asyncio.run(async_test_context_locality(get_module)) @@ -411,9 +408,6 @@ def test_new_policy(get_module): assert np._core.multiarray.get_handler_name(c) == orig_policy_name -@pytest.mark.xfail(sys.implementation.name == "pypy", - reason=("bad interaction between getenv and " - "os.environ inside pytest")) @pytest.mark.parametrize("policy", ["0", "1", None]) @pytest.mark.thread_unsafe(reason="modifies environment variables") def test_switch_owner(get_module, policy): diff --git a/numpy/_core/tests/test_memmap.py b/numpy/_core/tests/test_memmap.py index 8e2aa0a507b1..8df78da067eb 100644 --- a/numpy/_core/tests/test_memmap.py +++ b/numpy/_core/tests/test_memmap.py @@ -21,13 +21,7 @@ subtract, sum, ) -from numpy.testing import ( - IS_PYPY, - assert_, - assert_array_equal, - assert_equal, - break_cycles, -) +from numpy.testing import assert_, assert_array_equal, assert_equal @pytest.mark.thread_unsafe(reason="setup & memmap is thread-unsafe (gh-29126)") @@ -36,15 +30,11 @@ def setup_method(self): self.tmpfp = NamedTemporaryFile(prefix='mmap') self.shape = (3, 4) self.dtype = 'float32' - self.data = arange(12, dtype=self.dtype) - self.data.resize(self.shape) + self.data = arange(12, dtype=self.dtype).reshape(self.shape) def teardown_method(self): self.tmpfp.close() self.data = None - if IS_PYPY: - break_cycles() - break_cycles() def test_roundtrip(self): # Write data to file @@ -205,7 +195,7 @@ class MemmapSubClass(memmap): fp[:] = self.data # We keep previous behavior for subclasses of memmap, i.e. the - # ufunc and __getitem__ output is never turned into a ndarray + # ufunc and __getitem__ output is never turned into an ndarray assert_(sum(fp, axis=0).__class__ is MemmapSubClass) assert_(sum(fp).__class__ is MemmapSubClass) assert_(fp[1:, :-1].__class__ is MemmapSubClass) @@ -246,3 +236,51 @@ def test_shape_type(self): memmap(self.tmpfp, shape=self.shape, mode='w+') memmap(self.tmpfp, shape=list(self.shape), mode='w+') memmap(self.tmpfp, shape=asarray(self.shape), mode='w+') + + +class TestPatternMatching: + """Tests for structural pattern matching support (PEP 634).""" + + def test_match_sequence_pattern_1d(self): + with NamedTemporaryFile() as f: + arr = memmap(f, dtype='int64', mode='w+', shape=(3,)) + arr[:] = [1, 2, 3] + match arr: + case [a, b, c]: + assert a == 1 + assert b == 2 + assert c == 3 + case _: + raise AssertionError("1D memmap did not match sequence pattern") + + def test_match_sequence_pattern_2d(self): + with NamedTemporaryFile() as f: + arr = memmap(f, dtype='int64', mode='w+', shape=(2, 2)) + arr[:] = [[1, 2], [3, 4]] + match arr: + case [row1, row2]: + assert_array_equal(row1, [1, 2]) + assert_array_equal(row2, [3, 4]) + case _: + raise AssertionError("2D memmap did not match sequence pattern") + + def test_match_sequence_pattern_3d(self): + with NamedTemporaryFile() as f: + arr = memmap(f, dtype='int64', mode='w+', shape=(2, 2, 2)) + arr[:] = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + # outer matching + match arr: + case [plane1, plane2]: + assert_array_equal(plane1, [[1, 2], [3, 4]]) + assert_array_equal(plane2, [[5, 6], [7, 8]]) + case _: + raise AssertionError("3D memmap did not match sequence pattern") + # inner matching + match arr: + case [[row1, row2], [row3, row4]]: + assert_array_equal(row1, [1, 2]) + assert_array_equal(row2, [3, 4]) + assert_array_equal(row3, [5, 6]) + assert_array_equal(row4, [7, 8]) + case _: + raise AssertionError("3D memmap did not match sequence pattern") diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 218fcc79592e..dcb6ea192b3e 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -13,11 +13,14 @@ import pathlib import pickle import re +import subprocess import sys import tempfile +import textwrap +import tracemalloc import warnings import weakref -from contextlib import contextmanager +from contextlib import ExitStack, contextmanager # Need to test an object that does not fully implement math interface from datetime import datetime, timedelta @@ -37,8 +40,6 @@ BLAS_SUPPORTS_FPE, HAS_REFCOUNT, IS_64BIT, - IS_PYPY, - IS_PYSTON, IS_WASM, assert_, assert_allclose, @@ -55,7 +56,11 @@ runstring, temppath, ) -from numpy.testing._private.utils import _no_tracing, requires_memory +from numpy.testing._private.utils import ( + _no_tracing, + requires_deep_recursion, + requires_memory, +) def assert_arg_sorted(arr, arg): @@ -177,7 +182,6 @@ def test_writeable_from_buffer(self): vals.setflags(write=True) assert_(vals.flags.writeable) - @pytest.mark.skipif(IS_PYPY, reason="PyPy always copies") def test_writeable_pickle(self): import pickle # Small arrays will be copied without setting base. @@ -298,17 +302,17 @@ def test_int(self): (np.int64, np.uint64, 64)]: for i in range(1, s): assert_equal(hash(st(-2**i)), hash(-2**i), - err_msg="%r: -2**%d" % (st, i)) + err_msg=f"{st!r}: -2**{i}") assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), - err_msg="%r: 2**%d" % (st, i - 1)) + err_msg=f"{st!r}: 2**{i - 1}") assert_equal(hash(st(2**i - 1)), hash(2**i - 1), - err_msg="%r: 2**%d - 1" % (st, i)) + err_msg=f"{st!r}: 2**{i} - 1") i = max(i - 1, 1) assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), - err_msg="%r: 2**%d" % (ut, i - 1)) + err_msg=f"{ut!r}: 2**{i - 1}") assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), - err_msg="%r: 2**%d - 1" % (ut, i)) + err_msg=f"{ut!r}: 2**{i} - 1") class TestAttributes: @@ -323,9 +327,13 @@ def test_attributes(self): assert_equal(one.shape, (10,)) assert_equal(two.shape, (4, 5)) assert_equal(three.shape, (2, 5, 6)) - three.shape = (10, 3, 2) + with warnings.catch_warnings(): # gh-28901 + warnings.filterwarnings('ignore', category=DeprecationWarning) + three.shape = (10, 3, 2) assert_equal(three.shape, (10, 3, 2)) - three.shape = (2, 5, 6) + with warnings.catch_warnings(): # gh-28901 + warnings.filterwarnings('ignore', category=DeprecationWarning) + three.shape = (2, 5, 6) assert_equal(one.strides, (one.itemsize,)) num = two.itemsize assert_equal(two.strides, (5 * num, num)) @@ -592,7 +600,6 @@ def test_array_as_keyword(self, func): func(a=3) @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") - @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") @pytest.mark.parametrize("func", [np.array, np.asarray, @@ -1036,6 +1043,20 @@ def test_malloc_fails(self): with assert_raises(np._core._exceptions._ArrayMemoryError): np.empty(np.iinfo(np.intp).max, dtype=np.uint8) + @pytest.mark.thread_unsafe(reason="tracemalloc is not thread-safe") + def test_tracemalloc(self): + with ExitStack() as ctx: + if not tracemalloc.is_tracing(): + tracemalloc.start() + ctx.callback(tracemalloc.stop) + pre_snapshot = tracemalloc.take_snapshot() + arr = np.zeros(1000000, dtype="uint8") + post_snapshot = tracemalloc.take_snapshot() + diff = post_snapshot.compare_to(pre_snapshot, "filename") + allocated_bytes = sum(d.size_diff for d in diff) + # Allow for some non-data allocations + assert_allclose(allocated_bytes, arr.nbytes, 1000) + def test_zeros(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] for dt in types: @@ -1343,9 +1364,11 @@ def test_ndmax_greater_than_actual_dims(self): def test_ndmax_less_than_actual_dims(self): data = [[[1], [2]], [[3], [4]]] - with pytest.raises(ValueError, - match="setting an array element with a sequence. " - "The requested array would exceed the maximum number of dimension of 2."): + with pytest.raises( + ValueError, + match="setting an array element with a sequence. " + "The requested array would exceed the maximum number of dimension of 2.", + ): np.array(data, ndmax=2) def test_ndmax_is_zero(self): @@ -1377,7 +1400,7 @@ def test_ndmax_is_negative(self): with pytest.raises(ValueError, match="ndmax must be in the range"): np.array(data, ndmax=-1) - def test_ndmax_greather_than_NPY_MAXDIMS(self): + def test_ndmax_greater_than_NPY_MAXDIMS(self): data = [1, 2, 3] # current NPY_MAXDIMS is 64 with pytest.raises(ValueError, match="ndmax must be in the range"): @@ -1785,7 +1808,7 @@ def test_structured_cast_promotion_fieldorder(self): @pytest.mark.parametrize("align", [True, False]) def test_structured_promotion_packs(self, dtype_dict, align): # Structured dtypes are packed when promoted (we consider the packed - # form to be "canonical"), so tere is no extra padding. + # form to be "canonical"), so there is no extra padding. dtype = np.dtype(dtype_dict, align=align) # Remove non "canonical" dtype options: dtype_dict.pop("itemsize", None) @@ -2810,6 +2833,8 @@ def test_searchsorted_type_specific(self): for dt in types: if dt == 'M': dt = 'M8[D]' + if dt == 'm': + dt = 'm8[s]' if dt == '?': a = np.arange(2, dtype=dt) out = np.arange(2) @@ -2910,6 +2935,8 @@ def test_searchsorted_with_sorter(self): for dt in types: if dt == 'M': dt = 'M8[D]' + if dt == 'm': + dt = 'm8[s]' if dt == '?': a = np.array([1, 0], dtype=dt) # We want the sorter array to be of a type that is different @@ -3199,9 +3226,9 @@ def test_partition(self): aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt)) # array_less does not seem to work right at((p[:, :i].T <= p[:, i]).all(), - msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T)) + msg=f"{i}: {p[:, i]!r} <= {p[:, :i].T!r}") at((p[:, i + 1:].T > p[:, i]).all(), - msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) + msg=f"{i}: {p[:, i]!r} < {p[:, i + 1:].T!r}") for row in range(p.shape[0]): self.assert_partitioned(p[row], [i]) self.assert_partitioned(parg[row], [i]) @@ -3212,9 +3239,9 @@ def test_partition(self): aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt)) # array_less does not seem to work right at((p[:i, :] <= p[i, :]).all(), - msg="%d: %r <= %r" % (i, p[i, :], p[:i, :])) + msg=f"{i}: {p[i, :]!r} <= {p[:i, :]!r}") at((p[i + 1:, :] > p[i, :]).all(), - msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:])) + msg=f"{i}: {p[i, :]!r} < {p[:, i + 1:]!r}") for col in range(p.shape[1]): self.assert_partitioned(p[:, col], [i]) self.assert_partitioned(parg[:, col], [i]) @@ -3234,9 +3261,9 @@ def assert_partitioned(self, d, kth): prev = 0 for k in np.sort(kth): assert_array_compare(operator.__le__, d[prev:k], d[k], - err_msg='kth %d' % k) + err_msg=f'kth {k}') assert_((d[k:] >= d[k]).all(), - msg="kth %d, %r not greater equal %r" % (k, d[k:], d[k])) + msg=f"kth {k}, {d[k:]!r} not greater equal {d[k]!r}") prev = k + 1 def test_partition_iterative(self): @@ -4014,7 +4041,6 @@ def test_inplace(self): # - defer if other has __array_ufunc__ and it is None # or other is not a subclass and has higher array priority # - else, call ufunc - @pytest.mark.xfail(IS_PYPY, reason="Bug in pypy3.{9, 10}-v7.3.13, #24862") def test_ufunc_binop_interaction(self): # Python method name (without underscores) # -> (numpy ufunc, has_in_place_version, preferred_dtype) @@ -4089,8 +4115,8 @@ def make_obj(base, array_priority=False, array_ufunc=False, def check(obj, binop_override_expected, ufunc_override_expected, inplace_override_expected, check_scalar=True): for op, (ufunc, has_inplace, dtype) in ops.items(): - err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s' - % (op, ufunc, has_inplace, dtype)) + err_msg = (f'op: {op}, ufunc: {ufunc}, ' + f'has_inplace: {has_inplace}, dtype: {dtype}') check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)] if check_scalar: check_objs.append(check_objs[0][0]) @@ -4434,7 +4460,7 @@ def test_temporary_with_cast(self): for dt in (np.complex64, np.complex128, np.clongdouble): c = np.ones(100000, dtype=dt) r = abs(c * 2.0) - assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2))) + assert_equal(r.dtype, np.dtype(f'f{c.itemsize // 2}')) def test_elide_broadcast(self): # test no elision on broadcast to higher dimension @@ -4498,8 +4524,6 @@ def test_intp_sequence_converters(self, converter): @pytest.mark.parametrize("converter", [_multiarray_tests.run_scalar_intp_converter, _multiarray_tests.run_scalar_intp_from_sequence]) - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_intp_sequence_converters_errors(self, converter): with pytest.raises(TypeError, match="expected a sequence of integers or a single integer, "): @@ -4556,9 +4580,13 @@ def test_test_zero_rank(self): class TestPickling: - @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5, - reason=('this tests the error messages when trying to' - 'protocol 5 although it is not available')) + @pytest.mark.skipif( + pickle.HIGHEST_PROTOCOL >= 5, + reason=( + "this tests the error messages when trying toprotocol 5 " + "although it is not available" + ), + ) def test_correct_protocol5_error_message(self): array = np.arange(10) @@ -4583,8 +4611,10 @@ def test_record_array_with_object_dtype(self): assert_equal(arr_without_object.dtype, depickled_arr_without_object.dtype) - @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, - reason="requires pickle protocol 5") + @pytest.mark.skipif( + pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5", + ) def test_f_contiguous_array(self): f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F') buffers = [] @@ -4601,11 +4631,23 @@ def test_f_contiguous_array(self): assert_equal(f_contiguous_array, depickled_f_contiguous_array) - @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason="requires pickle protocol 5") - @pytest.mark.parametrize('transposed_contiguous_array', - [np.random.default_rng(42).random((2, 3, 4)).transpose((1, 0, 2)), - np.random.default_rng(42).random((2, 3, 4, 5)).transpose((1, 3, 0, 2))] + - [np.random.default_rng(42).random(np.arange(2, 7)).transpose(np.random.permutation(5)) for _ in range(3)]) + @pytest.mark.skipif( + pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5", + ) + @pytest.mark.parametrize( + "transposed_contiguous_array", + [ + np.random.default_rng(42).random((2, 3, 4)).transpose((1, 0, 2)), + np.random.default_rng(42).random((2, 3, 4, 5)).transpose((1, 3, 0, 2)), + ] + + [ + np.random.default_rng(42) + .random(np.arange(2, 7)) + .transpose(np.random.permutation(5)) + for _ in range(3) + ], + ) def test_transposed_contiguous_array(self, transposed_contiguous_array): buffers = [] # When using pickle protocol 5, arrays which can be transposed to c_contiguous @@ -4620,7 +4662,10 @@ def test_transposed_contiguous_array(self, transposed_contiguous_array): assert_equal(transposed_contiguous_array, depickled_transposed_contiguous_array) - @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason="requires pickle protocol 5") + @pytest.mark.skipif( + pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5", + ) def test_load_legacy_pkl_protocol5(self): # legacy byte strs are dumped in 2.2.1 c_contiguous_dumped = b'\x80\x05\x95\x90\x00\x00\x00\x00\x00\x00\x00\x8c\x13numpy._core.numeric\x94\x8c\x0b_frombuffer\x94\x93\x94(\x96\x18\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x94\x8c\x05numpy\x94\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94bK\x03K\x04K\x02\x87\x94\x8c\x01C\x94t\x94R\x94.' # noqa: E501 @@ -4645,9 +4690,14 @@ def test_non_contiguous_array(self): # using any protocol buffers = [] for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + buffer_callback = buffers.append if proto >= 5 else None depickled_non_contiguous_array = pickle.loads( - pickle.dumps(non_contiguous_array, protocol=proto, - buffer_callback=buffers.append if proto >= 5 else None)) + pickle.dumps( + non_contiguous_array, + protocol=proto, + buffer_callback=buffer_callback, + ) + ) assert_equal(len(buffers), 0) assert_equal(non_contiguous_array, depickled_non_contiguous_array) @@ -4679,59 +4729,91 @@ def _loads(self, obj): # version 0 pickles, using protocol=2 to pickle # version 0 doesn't have a version field - @pytest.mark.filterwarnings( - "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") def test_version0_int8(self): - s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb." + s = ( + b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\n" + b"ndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\n" + b"dtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff" + b"\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb." + ) a = np.array([1, 2, 3, 4], dtype=np.int8) p = self._loads(s) assert_equal(a, p) - @pytest.mark.filterwarnings( - "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") def test_version0_float32(self): - s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01= (3, 14) + assert "ValueError" in e.stdout + assert "It is possible that this is a false positive." in e.stdout + else: + if sys.version_info >= (3, 14): + raise AssertionError("Unexpected success of resize refcheck") + def test_check_reference_2(self): # see gh-30265 x = np.zeros((2, 2)) @@ -6297,10 +6392,7 @@ def test_check_reference_2(self): @_no_tracing def test_int_shape(self): x = np.eye(3) - if IS_PYPY: - x.resize(3, refcheck=False) - else: - x.resize(3) + x.resize(3) assert_array_equal(x, np.eye(3)[0, :]) def test_none_shape(self): @@ -6331,19 +6423,13 @@ def test_invalid_arguments(self): @_no_tracing def test_freeform_shape(self): x = np.eye(3) - if IS_PYPY: - x.resize(3, 2, 1, refcheck=False) - else: - x.resize(3, 2, 1) + x.resize(3, 2, 1) assert_(x.shape == (3, 2, 1)) @_no_tracing def test_zeros_appended(self): x = np.eye(3) - if IS_PYPY: - x.resize(2, 3, 3, refcheck=False) - else: - x.resize(2, 3, 3) + x.resize(2, 3, 3) assert_array_equal(x[0], np.eye(3)) assert_array_equal(x[1], np.zeros((3, 3))) @@ -6351,10 +6437,7 @@ def test_zeros_appended(self): def test_obj_obj(self): # check memory is initialized on resize, gh-4857 a = np.ones(10, dtype=[('k', object, 2)]) - if IS_PYPY: - a.resize(15, refcheck=False) - else: - a.resize(15,) + a.resize(15,) assert_equal(a.shape, (15,)) assert_array_equal(a['k'][-5:], 0) assert_array_equal(a['k'][:-5], 1) @@ -6522,6 +6605,22 @@ def test_basic(self): assert_array_equal(y, z) assert_array_equal(y, [67305985, 134678021]) + def test_view_dtype_change_subclass_finalize(self): + # gh-31192: view() with dtype change on a subclass must call + # __array_finalize__ and return the correct subclass type. + + class MyArray(np.ndarray): + def __array_finalize__(self, obj): + self.finalized_from = obj + self._dtype_at_finalize = self.dtype + + arr = np.arange(6).view(MyArray) + result = arr.view("i1") + assert isinstance(result, MyArray) + assert result.dtype == np.dtype("i1") + assert result._dtype_at_finalize == np.dtype("i1") + assert result.finalized_from is arr + def _mean(a, **args): return a.mean(**args) @@ -7196,6 +7295,13 @@ def test_dot_array_order(self): assert_equal(np.dot(b, a), res) assert_equal(np.dot(b, b), res) + def test_dot_has_native_byteorder(self): + # gh-30931 + a = np.array([1, 2, 3], ">f8") + dot = a.dot([[], [], []]) + + assert_equal(dot.dtype, np.dtype("=f8")) + def test_accelerate_framework_sgemv_fix(self): def aligned_array(shape, align, dtype, order='C'): @@ -8224,22 +8330,22 @@ def test_complex_warning(self): class TestMinScalarType: - def test_usigned_shortshort(self): + def test_unsigned_shortshort(self): dt = np.min_scalar_type(2**8 - 1) wanted = np.dtype('uint8') assert_equal(wanted, dt) - def test_usigned_short(self): + def test_unsigned_short(self): dt = np.min_scalar_type(2**16 - 1) wanted = np.dtype('uint16') assert_equal(wanted, dt) - def test_usigned_int(self): + def test_unsigned_int(self): dt = np.min_scalar_type(2**32 - 1) wanted = np.dtype('uint32') assert_equal(wanted, dt) - def test_usigned_longlong(self): + def test_unsigned_longlong(self): dt = np.min_scalar_type(2**63 - 1) wanted = np.dtype('uint64') assert_equal(wanted, dt) @@ -8266,7 +8372,7 @@ def test_native_padding(self): if j == 0: s = 'bi' else: - s = 'b%dxi' % j + s = f'b{j}xi' self._check('@' + s, {'f0': ('i1', 0), 'f1': ('i', align * (1 + j // align))}) self._check('=' + s, {'f0': ('i1', 0), @@ -8567,9 +8673,9 @@ def test_export_record(self): sz = sum(np.dtype(b).itemsize for a, b in dt) if np.dtype('l').itemsize == 4: - assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') # noqa: E501 else: - assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') # noqa: E501 assert_equal(y.strides, (sz,)) assert_equal(y.itemsize, sz) @@ -8628,7 +8734,9 @@ def test_padding(self): x = np.array([(1,), (2,)], dtype={'f0': (int, j)}) self._check_roundtrip(x) - @pytest.mark.thread_unsafe(reason="test result depends on the reference count of a global object") + @pytest.mark.thread_unsafe( + reason="test result depends on the reference count of a global object", + ) def test_reference_leak(self): if HAS_REFCOUNT: count_1 = sys.getrefcount(np._core._internal) @@ -8750,7 +8858,9 @@ class foo(ctypes.Structure): assert_equal(arr['a'], 3) @pytest.mark.parametrize("obj", [np.ones(3), np.ones(1, dtype="i,i")[()]]) - @pytest.mark.thread_unsafe(reason="_multiarray_tests used memoryview, which is thread-unsafe") + @pytest.mark.thread_unsafe( + reason="_multiarray_tests used memoryview, which is thread-unsafe", + ) def test_error_if_stored_buffer_info_is_corrupted(self, obj): """ If a user extends a NumPy array before 1.20 and then runs it @@ -9057,8 +9167,7 @@ def test_order_mismatch(self, arr, order1, order2): for copy in self.if_needed_vals + self.false_vals: res = np.array(view, copy=copy, order=order2) # res.base.obj refers to the memoryview - if not IS_PYPY: - assert res is arr or res.base.obj is arr + assert res is arr or res.base.obj is arr else: for copy in self.if_needed_vals: res = np.array(arr, copy=copy, order=order2) @@ -9147,7 +9256,9 @@ def __array_interface__(self): # This fails due to going into the buffer protocol path (f, {'data': None, 'shape': ()}, TypeError), ]) - @pytest.mark.thread_unsafe(reason="test result depends on the reference count of a global object") + @pytest.mark.thread_unsafe( + reason="test result depends on the reference count of a global object", + ) def test_scalar_interface(self, val, iface, expected): # Test scalar coercion within the array interface self.f.iface = {'typestr': 'f8'} @@ -9349,6 +9460,7 @@ def test_to_bool_scalar(self): assert_equal(bool(np.array([True])), True) assert_equal(bool(np.array([[42]])), True) + @requires_deep_recursion def test_to_bool_scalar_not_convertible(self): class NotConvertible: @@ -9357,11 +9469,6 @@ def __bool__(self): assert_raises(NotImplementedError, bool, np.array(NotConvertible())) assert_raises(NotImplementedError, bool, np.array([NotConvertible()])) - if IS_PYSTON: - pytest.skip("Pyston disables recursion checking") - if IS_WASM: - pytest.skip("Pyodide/WASM has limited stack size") - self_containing = np.array([None]) self_containing[0] = self_containing @@ -9554,6 +9661,13 @@ def test_error(self): assert_raises(ValueError, np.where, c, a, a) assert_raises(ValueError, np.where, c[0], a, b) + def test_scalar_overflow(self): + c = [True] + a = np.array([1], dtype=np.uint8) + b = 1000 + assert_raises(OverflowError, np.where, c, a, b) + assert_raises(OverflowError, np.where, c, b, a) + def test_string(self): # gh-4778 check strings are properly filled with nulls a = np.array("abc") @@ -9592,59 +9706,57 @@ def test_kwargs(self): np.where(a, x=a, y=a) -if not IS_PYPY: - # sys.getsizeof() is not valid on PyPy - class TestSizeOf: - - def test_empty_array(self): - x = np.array([]) - assert_(sys.getsizeof(x) > 0) +class TestSizeOf: - def check_array(self, dtype): - elem_size = dtype(0).itemsize + def test_empty_array(self): + x = np.array([]) + assert_(sys.getsizeof(x) > 0) - for length in [10, 50, 100, 500]: - x = np.arange(length, dtype=dtype) - assert_(sys.getsizeof(x) > length * elem_size) + def check_array(self, dtype): + elem_size = dtype(0).itemsize - def test_array_int32(self): - self.check_array(np.int32) + for length in [10, 50, 100, 500]: + x = np.arange(length, dtype=dtype) + assert_(sys.getsizeof(x) > length * elem_size) - def test_array_int64(self): - self.check_array(np.int64) + def test_array_int32(self): + self.check_array(np.int32) - def test_array_float32(self): - self.check_array(np.float32) + def test_array_int64(self): + self.check_array(np.int64) - def test_array_float64(self): - self.check_array(np.float64) + def test_array_float32(self): + self.check_array(np.float32) - def test_view(self): - d = np.ones(100) - assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) + def test_array_float64(self): + self.check_array(np.float64) - def test_reshape(self): - d = np.ones(100) - assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) + def test_view(self): + d = np.ones(100) + assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) - @_no_tracing - def test_resize(self): - d = np.ones(100) - old = sys.getsizeof(d) - d.resize(50) - assert_(old > sys.getsizeof(d)) - d.resize(150) - assert_(old < sys.getsizeof(d)) + def test_reshape(self): + d = np.ones(100) + assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) - @pytest.mark.parametrize("dtype", ["u4,f4", "u4,O"]) - def test_resize_structured(self, dtype): - a = np.array([(0, 0.0) for i in range(5)], dtype=dtype) - a.resize(1000) - assert_array_equal(a, np.zeros(1000, dtype=dtype)) + @_no_tracing + def test_resize(self): + d = np.ones(100) + old = sys.getsizeof(d) + d.resize(50) + assert_(old > sys.getsizeof(d)) + d.resize(150) + assert_(old < sys.getsizeof(d)) + + @pytest.mark.parametrize("dtype", ["u4,f4", "u4,O"]) + def test_resize_structured(self, dtype): + a = np.array([(0, 0.0) for i in range(5)], dtype=dtype) + a.resize(1000) + assert_array_equal(a, np.zeros(1000, dtype=dtype)) - def test_error(self): - d = np.ones(100) - assert_raises(TypeError, d.__sizeof__, "a") + def test_error(self): + d = np.ones(100) + assert_raises(TypeError, d.__sizeof__, "a") class TestHashing: @@ -9825,9 +9937,6 @@ def test_1d_format(self): assert_raises(TypeError, '{:30}'.format, a) -from numpy.testing import IS_PYPY - - class TestCTypes: def test_ctypes_is_available(self): @@ -9895,12 +10004,10 @@ def test_ctypes_data_as_holds_reference(self, arr): # but when the `ctypes_ptr` object dies, so should `arr` del ctypes_ptr - if IS_PYPY: - # Pypy does not recycle arr objects immediately. Trigger gc to - # release arr. Cpython uses refcounts. An explicit call to gc - # should not be needed here. - break_cycles() - assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference") + assert_( + arr_ref() is None, + "unknowable whether ctypes pointer holds a reference", + ) @pytest.mark.thread_unsafe(reason="calls gc.collect()") def test_ctypes_as_parameter_holds_reference(self): @@ -9917,9 +10024,10 @@ def test_ctypes_as_parameter_holds_reference(self): # but when the `ctypes_ptr` object dies, so should `arr` del ctypes_ptr - if IS_PYPY: - break_cycles() - assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference") + assert_( + arr_ref() is None, + "unknowable whether ctypes pointer holds a reference", + ) class TestWritebackIfCopy: @@ -10532,7 +10640,11 @@ def test_strided_loop_alignments(self): # test casting, both to and from misaligned with warnings.catch_warnings(): - warnings.filterwarnings('ignore', "Casting complex values", ComplexWarning) + warnings.filterwarnings( + "ignore", + "Casting complex values", + ComplexWarning, + ) xc64.astype('f8') xf64.astype(np.complex64) test = xc64 + xf64 @@ -10746,7 +10858,9 @@ def test_argsort_largearrays(dtype): assert_arg_sorted(arr, np.argsort(arr, kind='quick')) @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") -@pytest.mark.thread_unsafe(reason="test result depends on the reference count of a global object") +@pytest.mark.thread_unsafe( + reason="test result depends on the reference count of a global object", +) def test_gh_22683(): b = 777.68760986 a = np.array([b] * 10000, dtype=object) @@ -10892,7 +11006,9 @@ def test_to_device(self): arr.to_device("cpu", stream=1) def test_array_interface_excess_dimensions_raises(): - """Regression test for gh-27949: ensure too many dims raises ValueError instead of segfault.""" + """Regression test for gh-27949. + Ensure too many dims raises ValueError instead of segfault. + """ # Dummy object to hold a custom __array_interface__ class DummyArray: @@ -10924,7 +11040,6 @@ def test_array_dunder_array_preserves_dtype_on_none(dtype): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") class TestTextSignatures: @pytest.mark.parametrize( "methodname", @@ -10993,6 +11108,7 @@ def test_c_func_dispatcher_signature(self, func): (np.fromfile, ("file", "dtype", "count", "sep", "offset", "like")), (np.fromiter, ("iter", "dtype", "count", "like")), (np.frompyfunc, ("func", "nin", "nout", "kwargs")), + (np.fromstring, ("string", "dtype", "count", "sep", "like")), (np.nested_iters, ( "op", "axes", "flags", "op_flags", "op_dtypes", "order", "casting", "buffersize", @@ -11006,3 +11122,45 @@ def test_add_newdoc_function_signature(self, func, parameter_names): sig = inspect.signature(func) assert sig.parameters assert tuple(sig.parameters) == parameter_names + + +class TestPatternMatching: + """Tests for structural pattern matching support (PEP 634).""" + + def test_match_sequence_pattern_1d(self): + arr = np.array([1, 2, 3]) + match arr: + case [a, b, c]: + assert a == 1 + assert b == 2 + assert c == 3 + case _: + raise AssertionError("1D ndarray did not match sequence pattern") + + def test_match_sequence_pattern_2d(self): + arr = np.array([[1, 2], [3, 4]]) + match arr: + case [row1, row2]: + assert_array_equal(row1, [1, 2]) + assert_array_equal(row2, [3, 4]) + case _: + raise AssertionError("2D ndarray did not match sequence pattern") + + def test_match_sequence_pattern_3d(self): + arr = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) + # outer matching + match arr: + case [plane1, plane2]: + assert_array_equal(plane1, [[1, 2], [3, 4]]) + assert_array_equal(plane2, [[5, 6], [7, 8]]) + case _: + raise AssertionError("3D ndarray did not match sequence pattern") + # inner matching + match arr: + case [[row1, row2], [row3, row4]]: + assert_array_equal(row1, [1, 2]) + assert_array_equal(row2, [3, 4]) + assert_array_equal(row3, [5, 6]) + assert_array_equal(row4, [7, 8]) + case _: + raise AssertionError("3D ndarray did not match sequence pattern") diff --git a/numpy/_core/tests/test_multiprocessing.py b/numpy/_core/tests/test_multiprocessing.py new file mode 100644 index 000000000000..2c5c2fcfb8ed --- /dev/null +++ b/numpy/_core/tests/test_multiprocessing.py @@ -0,0 +1,51 @@ +import pytest + +import numpy as np +from numpy.testing import IS_WASM + +pytestmark = pytest.mark.thread_unsafe( + reason="tests in this module are explicitly multi-processed" +) + +def bool_array_writer(shm_name, n): + # writer routine for test_read_write_bool_array + import time + from multiprocessing import shared_memory + shm = shared_memory.SharedMemory(name=shm_name) + arr = np.ndarray(n, dtype=np.bool_, buffer=shm.buf) + for i in range(n): + arr[i] = True + time.sleep(0.00001) + +def bool_array_reader(shm_name, n): + # reader routine for test_read_write_bool_array + from multiprocessing import shared_memory + shm = shared_memory.SharedMemory(name=shm_name) + arr = np.ndarray(n, dtype=np.bool_, buffer=shm.buf) + for i in range(n): + while not arr[i]: + pass + +@pytest.mark.skipif(IS_WASM, + reason="WASM does not support _posixshmem") +def test_read_write_bool_array(): + # See: gh-30389 + # + # Prior to Python 3.13, boolean scalar singletons (np.True / np.False) were + # regular reference-counted objects. Due to the double evaluation in + # PyArrayScalar_RETURN_BOOL_FROM_LONG, concurrent reads and writes of a + # boolean array could corrupt their refcounts, potentially causing a crash + # (e.g., `free(): invalid pointer`). + # + # This test creates a multi-process race between a writer and a reader to + # ensure that NumPy does not exhibit such failures. + from concurrent.futures import ProcessPoolExecutor + from multiprocessing import shared_memory + n = 10000 + shm = shared_memory.SharedMemory(create=True, size=n) + with ProcessPoolExecutor(max_workers=2) as executor: + f_writer = executor.submit(bool_array_writer, shm.name, n) + f_reader = executor.submit(bool_array_reader, shm.name, n) + shm.unlink() + f_writer.result() + f_reader.result() diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 9b10974839a4..d0f2ebce24e8 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -16,7 +16,7 @@ reason="tests in this module are already explicitly multi-threaded" ) -def test_parallel_randomstate_creation(): +def test_parallel_randomstate(): # if the coercion cache is enabled and not thread-safe, creating # RandomState instances simultaneously leads to a data race def func(seed): @@ -24,6 +24,17 @@ def func(seed): run_threaded(func, 500, pass_count=True) + # seeding and setting state shouldn't race with generating RNG samples + rng = np.random.RandomState() + + def func(seed): + base_rng = np.random.RandomState(seed) + state = base_rng.get_state() + rng.seed(seed) + rng.random() + rng.set_state(state) + + run_threaded(func, 8, pass_count=True) def test_parallel_ufunc_execution(): # if the loop data cache or dispatch cache are not thread-safe @@ -297,27 +308,33 @@ def func(index): # These are all implemented using PySequence_Fast, which needs locking to be safe def np_broadcast(arrs): - for i in range(100): + for i in range(50): np.broadcast(arrs) def create_array(arrs): - for i in range(100): + for i in range(50): np.array(arrs) def create_nditer(arrs): - for i in range(1000): + for i in range(50): np.nditer(arrs) -@pytest.mark.parametrize("kernel", (np_broadcast, create_array, create_nditer)) -def test_arg_locking(kernel): - # should complete without failing or generating an error about an array size - # changing - b = threading.Barrier(5) +@pytest.mark.parametrize( + "kernel, outcome", + ( + (np_broadcast, "error"), + (create_array, "error"), + (create_nditer, "success"), + ), +) +def test_arg_locking(kernel, outcome): + # should complete without triggering races but may error + done = 0 - arrs = [] + arrs = [np.array([1, 2, 3]) for _ in range(1000)] - def read_arrs(): + def read_arrs(b): nonlocal done b.wait() try: @@ -325,7 +342,7 @@ def read_arrs(): finally: done += 1 - def mutate_list(): + def contract_and_expand_list(b): b.wait() while done < 4: if len(arrs) > 10: @@ -333,10 +350,54 @@ def mutate_list(): elif len(arrs) <= 10: arrs.extend([np.array([1, 2, 3]) for _ in range(1000)]) - arrs = [np.array([1, 2, 3]) for _ in range(1000)] + def replace_list_items(b): + b.wait() + rng = np.random.RandomState() + rng.seed(0x4d3d3d3) + while done < 4: + data = rng.randint(0, 1000, size=4) + arrs[data[0]] = data[1:] - tasks = [threading.Thread(target=read_arrs) for _ in range(4)] - tasks.append(threading.Thread(target=mutate_list)) + for mutation_func in (replace_list_items, contract_and_expand_list): + b = threading.Barrier(5) + try: + with concurrent.futures.ThreadPoolExecutor(max_workers=5) as tpe: + tasks = [tpe.submit(read_arrs, b) for _ in range(4)] + tasks.append(tpe.submit(mutation_func, b)) + for t in tasks: + t.result() + except RuntimeError as e: + if outcome == "success": + raise + assert "Inconsistent object during array creation?" in str(e) + msg = "replace_list_items should not raise errors" + assert mutation_func is contract_and_expand_list, msg + finally: + if len(tasks) < 5: + b.abort() + +def test_array__buffer__thread_safety(): + import inspect + arr = np.arange(1000) + flags = [inspect.BufferFlags.STRIDED, inspect.BufferFlags.READ] + + def func(b): + b.wait() + for i in range(100): + arr.__buffer__(flags[i % 2]) + + run_threaded(func, max_workers=8, pass_barrier=True) + +def test_void_dtype__buffer__thread_safety(): + import inspect + dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()] + assert isinstance(x, np.void) + flags = [inspect.BufferFlags.STRIDES, inspect.BufferFlags.READ] + + def func(b): + b.wait() + for i in range(100): + x.__buffer__(flags[i % 2]) - [t.start() for t in tasks] - [t.join() for t in tasks] + run_threaded(func, max_workers=8, pass_barrier=True) diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 943c25cdaa13..fa158728285b 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -13,7 +13,6 @@ from numpy.testing import ( HAS_REFCOUNT, IS_64BIT, - IS_PYPY, IS_WASM, assert_, assert_array_equal, @@ -262,43 +261,164 @@ def test_iter_best_order_multi_index_3d(): a = arange(12) # 3D C-order i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1), - (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)]) + assert_equal( + iter_multi_index(i), + [ + (0, 0, 0), + (0, 0, 1), + (0, 1, 0), + (0, 1, 1), + (0, 2, 0), + (0, 2, 1), + (1, 0, 0), + (1, 0, 1), + (1, 1, 0), + (1, 1, 1), + (1, 2, 0), + (1, 2, 1), + ], + ) # 3D Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0), - (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)]) + assert_equal( + iter_multi_index(i), + [ + (0, 0, 0), + (1, 0, 0), + (0, 1, 0), + (1, 1, 0), + (0, 2, 0), + (1, 2, 0), + (0, 0, 1), + (1, 0, 1), + (0, 1, 1), + (1, 1, 1), + (0, 2, 1), + (1, 2, 1), + ], + ) # 3D reversed C-order i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1), - (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)]) + assert_equal( + iter_multi_index(i), + [ + (1, 0, 0), + (1, 0, 1), + (1, 1, 0), + (1, 1, 1), + (1, 2, 0), + (1, 2, 1), + (0, 0, 0), + (0, 0, 1), + (0, 1, 0), + (0, 1, 1), + (0, 2, 0), + (0, 2, 1), + ], + ) i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), - (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) + assert_equal( + iter_multi_index(i), + [ + (0, 2, 0), + (0, 2, 1), + (0, 1, 0), + (0, 1, 1), + (0, 0, 0), + (0, 0, 1), + (1, 2, 0), + (1, 2, 1), + (1, 1, 0), + (1, 1, 1), + (1, 0, 0), + (1, 0, 1), + ], + ) i = nditer(a.reshape(2, 3, 2)[:, :, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), - (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) + assert_equal( + iter_multi_index(i), + [ + (0, 0, 1), + (0, 0, 0), + (0, 1, 1), + (0, 1, 0), + (0, 2, 1), + (0, 2, 0), + (1, 0, 1), + (1, 0, 0), + (1, 1, 1), + (1, 1, 0), + (1, 2, 1), + (1, 2, 0), + ], + ) # 3D reversed Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0), - (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), - (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, :, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), - (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)]) + i = nditer( + a.reshape(2, 3, 2).copy(order='F')[::-1], + ['multi_index'], + [['readonly']], + ) + assert_equal( + iter_multi_index(i), + [ + (1, 0, 0), + (0, 0, 0), + (1, 1, 0), + (0, 1, 0), + (1, 2, 0), + (0, 2, 0), + (1, 0, 1), + (0, 0, 1), + (1, 1, 1), + (0, 1, 1), + (1, 2, 1), + (0, 2, 1), + ], + ) + i = nditer( + a.reshape(2, 3, 2).copy(order="F")[:, ::-1], + ["multi_index"], + [["readonly"]], + ) + assert_equal( + iter_multi_index(i), + [ + (0, 2, 0), + (1, 2, 0), + (0, 1, 0), + (1, 1, 0), + (0, 0, 0), + (1, 0, 0), + (0, 2, 1), + (1, 2, 1), + (0, 1, 1), + (1, 1, 1), + (0, 0, 1), + (1, 0, 1), + ], + ) + i = nditer( + a.reshape(2, 3, 2).copy(order="F")[:, :, ::-1], + ["multi_index"], + [["readonly"]], + ) + assert_equal( + iter_multi_index(i), + [ + (0, 0, 1), + (1, 0, 1), + (0, 1, 1), + (1, 1, 1), + (0, 2, 1), + (1, 2, 1), + (0, 0, 0), + (1, 0, 0), + (0, 1, 0), + (1, 1, 0), + (0, 2, 0), + (1, 2, 0), + ], + ) def test_iter_best_order_c_index_1d(): # The C index should be correct with any reordering @@ -700,8 +820,8 @@ def test_iter_broadcasting_errors(): assert_(msg.find('(2,3)->(2,3)') >= 0, f'Message "{msg}" doesn\'t contain operand shape (2,3)->(2,3)') assert_(msg.find('(2,)->(2,newaxis)') >= 0, - ('Message "%s" doesn\'t contain remapped operand shape' - '(2,)->(2,newaxis)') % msg) + f'Message "{msg}" doesn\'t contain remapped operand shape' + '(2,)->(2,newaxis)') # The message should contain the itershape parameter assert_(msg.find('(4,3)') >= 0, f'Message "{msg}" doesn\'t contain itershape parameter (4,3)') @@ -1223,8 +1343,14 @@ def test_iter_copy_if_overlap(): x = arange(10) a = x b = x - i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'], - ['readwrite', 'overlap_assume_elementwise']]) + i = nditer( + [a, b], + ["copy_if_overlap"], + [ + ["readonly", "overlap_assume_elementwise"], + ["readwrite", "overlap_assume_elementwise"], + ], + ) with i: assert_(i.operands[0] is a and i.operands[1] is b) with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: @@ -1403,7 +1529,10 @@ def test_iter_copy(): @pytest.mark.parametrize("dtype", np.typecodes["All"]) @pytest.mark.parametrize("loop_dtype", np.typecodes["All"]) -@pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") +@pytest.mark.filterwarnings( + "ignore::numpy.exceptions.ComplexWarning", + "ignore::DeprecationWarning", +) def test_iter_copy_casts(dtype, loop_dtype): # Ensure the dtype is never flexible: if loop_dtype.lower() == "m": @@ -2079,7 +2208,6 @@ def test_buffered_cast_error_paths(): buf[...] = "a" # cannot be converted to int. @pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -@pytest.mark.skipif(not HAS_REFCOUNT, reason="PyPy seems to not hit this.") def test_buffered_cast_error_paths_unraisable(): # The following gives an unraisable error. Pytest sometimes captures that # (depending python and/or pytest version). So with Python>=3.8 this can @@ -2646,7 +2774,10 @@ def test_0d(self): i, j = np.nested_iters(a, [[1, 0, 2], []]) vals = [list(j) for _ in i] - assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) + assert_equal( + vals, + [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]], + ) i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) vals = [] @@ -3206,6 +3337,13 @@ def test_iter_too_large_with_multiindex(): with assert_raises(ValueError): _multiarray_tests.test_nditer_too_large(arrays, i * 2 + 1, mode) + +def test_invalid_call_of_enable_external_loop(): + with pytest.raises(ValueError, + match='Iterator flag EXTERNAL_LOOP cannot be used'): + np.nditer(([[1], [2]], [3, 4]), ['multi_index']).enable_external_loop() + + def test_writebacks(): a = np.arange(6, dtype='f4') au = a.byteswap() @@ -3503,7 +3641,6 @@ def test_debug_print(capfd): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") def test_signature_constructor(): sig = inspect.signature(np.nditer) @@ -3514,7 +3651,6 @@ def test_signature_constructor(): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") @pytest.mark.parametrize( "method", [fn for name, fn in vars(np.nditer).items() if callable(fn) and name[0] != "_"], diff --git a/numpy/_core/tests/test_nep50_promotions.py b/numpy/_core/tests/test_nep50_promotions.py index 8d9d9e63ce38..72f854c7b001 100644 --- a/numpy/_core/tests/test_nep50_promotions.py +++ b/numpy/_core/tests/test_nep50_promotions.py @@ -112,7 +112,7 @@ def test_weak_promotion_scalar_path(op): # Integer path: res = op(np.uint8(3), 5) assert res == op(3, 5) - assert res.dtype == np.uint8 or res.dtype == bool # noqa: PLR1714 + assert res.dtype == np.uint8 or res.dtype == bool with pytest.raises(OverflowError): op(np.uint8(3), 1000) @@ -120,14 +120,14 @@ def test_weak_promotion_scalar_path(op): # Float path: res = op(np.float32(3), 5.) assert res == op(3., 5.) - assert res.dtype == np.float32 or res.dtype == bool # noqa: PLR1714 + assert res.dtype == np.float32 or res.dtype == bool def test_nep50_complex_promotion(): with pytest.warns(RuntimeWarning, match=".*overflow"): res = np.complex64(3) + complex(2**300) - assert type(res) == np.complex64 + assert type(res) is np.complex64 def test_nep50_integer_conversion_errors(): diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 9e71b7c6b1b8..5909c9eb8564 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -19,7 +19,6 @@ from numpy.random import rand, randint, randn from numpy.testing import ( HAS_REFCOUNT, - IS_PYPY, IS_WASM, assert_, assert_almost_equal, @@ -1212,10 +1211,10 @@ def test_promote_types_endian(self): assert_equal(np.promote_types('U8', '>U5'), np.dtype('U8')) - assert_equal(np.promote_types('M8', '>M8'), np.dtype('M8')) - assert_equal(np.promote_types('m8', '>m8'), np.dtype('m8')) + assert_equal(np.promote_types('M8[D]', '>M8[D]'), np.dtype('M8[D]')) + assert_equal(np.promote_types('m8[s]', '>m8[s]'), np.dtype('m8[s]')) def test_can_cast_and_promote_usertypes(self): # The rational type defines safe casting for signed integers, @@ -1773,14 +1772,8 @@ def assert_equal_w_dt(a, b, err_msg): err_msg = msg % (np.dtype(dt).name,) if dt != 'V': - if dt != 'M': - m = np.zeros((3, 3), dtype=dt) - n = np.ones(1, dtype=dt) - - m[0, 0] = n[0] - m[1, 0] = n[0] - - else: # np.zeros doesn't work for np.datetime64 + if dt == 'M': + # np.zeros doesn't work for np.datetime64 m = np.array(['1970-01-01'] * 9) m = m.reshape((3, 3)) @@ -1788,6 +1781,24 @@ def assert_equal_w_dt(a, b, err_msg): m[1, 0] = '1970-01-12' m = m.astype(dt) + elif dt == 'm': + with pytest.warns( + DeprecationWarning, + match="The 'generic' unit for NumPy timedelta is deprecated", + ): + m = np.zeros((3, 3), dtype=dt) + n = np.ones(1, dtype=dt) + + m[0, 0] = n[0] + m[1, 0] = n[0] + + else: + m = np.zeros((3, 3), dtype=dt) + n = np.ones(1, dtype=dt) + + m[0, 0] = n[0] + m[1, 0] = n[0] + expected = np.array([2, 0, 0], dtype=np.intp) assert_equal_w_dt(np.count_nonzero(m, axis=0), expected, err_msg=err_msg) @@ -2161,8 +2172,8 @@ def _test_array_equal_parametrizations(): yield (b4, b4.copy(), False, False) yield (b4, b4.copy(), True, True) - t1 = b1.astype("timedelta64") - t2 = b2.astype("timedelta64") + t1 = b1.astype("timedelta64[D]") + t2 = b2.astype("timedelta64[D]") # Timedeltas are particular yield (t1, t1, None, False) @@ -2816,10 +2827,10 @@ def test_clip_value_min_max_flip(self, amin, amax): np.full(10, -2**64 + 1, dtype=object)), # for bugs in NPY_TIMEDELTA_MAX, based on a case # produced by hypothesis - (np.zeros(10, dtype='m8') - 1, - 0, - 0, - np.zeros(10, dtype='m8')), + (np.zeros(10, dtype='m8[s]') - np.timedelta64(1, 's'), + np.timedelta64(0, 's'), + np.timedelta64(0, 's'), + np.zeros(10, dtype='m8[s]')), ]) def test_clip_problem_cases(self, arr, amin, amax, exp): actual = np.clip(arr, amin, amax) @@ -2839,8 +2850,8 @@ def test_clip_scalar_nan_propagation(self, arr, amin, amax): assert_equal(actual, expected) @pytest.mark.parametrize("arr, amin, amax", [ - (np.array([1] * 10, dtype='m8'), - np.timedelta64('NaT'), + (np.array([1] * 10, dtype='m8[s]'), + np.timedelta64('NaT', 's'), np.zeros(10, dtype=np.int32)), ]) def test_NaT_propagation(self, arr, amin, amax): @@ -3208,9 +3219,7 @@ def test_timedelta(self): # Allclose currently works for timedelta64 as long as `atol` is # an integer or also a timedelta64 a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") - assert np.isclose(a, a, atol=0, equal_nan=True).all() assert np.isclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True).all() - assert np.allclose(a, a, atol=0, equal_nan=True) assert np.allclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True) def test_tol_warnings(self): @@ -3385,7 +3394,6 @@ def test_for_reference_leak(self): assert_(sys.getrefcount(dim) == beg) @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") - @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") @pytest.mark.parametrize("func", [np.empty, np.zeros, np.ones, np.full]) def test_signatures(self, func): sig = inspect.signature(func) @@ -3581,7 +3589,7 @@ def test_dtype_str_bytes(self, likefunc, dtype): b = a[:, ::2] # Ensure b is not contiguous. kwargs = {'fill_value': ''} if likefunc == np.full_like else {} result = likefunc(b, dtype=dtype, **kwargs) - if dtype == str: + if dtype is str: assert result.strides == (16, 4) else: # dtype is bytes @@ -3941,29 +3949,16 @@ def test_array_likes(self): class TestCross: - @pytest.mark.filterwarnings( - "ignore:.*2-dimensional vectors.*:DeprecationWarning" - ) def test_2x2(self): u = [1, 2] v = [3, 4] - z = -2 - cp = np.cross(u, v) - assert_equal(cp, z) - cp = np.cross(v, u) - assert_equal(cp, -z) + assert_raises(ValueError, np.cross, u, v) - @pytest.mark.filterwarnings( - "ignore:.*2-dimensional vectors.*:DeprecationWarning" - ) def test_2x3(self): u = [1, 2] v = [3, 4, 5] - z = np.array([10, -5, -2]) - cp = np.cross(u, v) - assert_equal(cp, z) - cp = np.cross(v, u) - assert_equal(cp, -z) + assert_raises(ValueError, np.cross, u, v) + assert_raises(ValueError, np.cross, v, u) def test_3x3(self): u = [1, 2, 3] @@ -3974,32 +3969,7 @@ def test_3x3(self): cp = np.cross(v, u) assert_equal(cp, -z) - @pytest.mark.filterwarnings( - "ignore:.*2-dimensional vectors.*:DeprecationWarning" - ) def test_broadcasting(self): - # Ticket #2624 (Trac #2032) - u = np.tile([1, 2], (11, 1)) - v = np.tile([3, 4], (11, 1)) - z = -2 - assert_equal(np.cross(u, v), z) - assert_equal(np.cross(v, u), -z) - assert_equal(np.cross(u, u), 0) - - u = np.tile([1, 2], (11, 1)).T - v = np.tile([3, 4, 5], (11, 1)) - z = np.tile([10, -5, -2], (11, 1)) - assert_equal(np.cross(u, v, axisa=0), z) - assert_equal(np.cross(v, u.T), -z) - assert_equal(np.cross(v, v), 0) - - u = np.tile([1, 2, 3], (11, 1)).T - v = np.tile([3, 4], (11, 1)).T - z = np.tile([-12, 9, -2], (11, 1)) - assert_equal(np.cross(u, v, axisa=0, axisb=0), z) - assert_equal(np.cross(v.T, u.T), -z) - assert_equal(np.cross(u.T, u.T), 0) - u = np.tile([1, 2, 3], (5, 1)) v = np.tile([4, 5, 6], (5, 1)).T z = np.tile([-3, 6, -3], (5, 1)) @@ -4007,27 +3977,20 @@ def test_broadcasting(self): assert_equal(np.cross(v.T, u), -z) assert_equal(np.cross(u, u), 0) - @pytest.mark.filterwarnings( - "ignore:.*2-dimensional vectors.*:DeprecationWarning" - ) def test_broadcasting_shapes(self): u = np.ones((2, 1, 3)) v = np.ones((5, 3)) assert_equal(np.cross(u, v).shape, (2, 5, 3)) u = np.ones((10, 3, 5)) - v = np.ones((2, 5)) + v = np.ones((3, 5)) assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3)) assert_raises(AxisError, np.cross, u, v, axisa=1, axisb=2) assert_raises(AxisError, np.cross, u, v, axisa=3, axisb=0) u = np.ones((10, 3, 5, 7)) - v = np.ones((5, 7, 2)) + v = np.ones((5, 7, 3)) assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7)) assert_raises(AxisError, np.cross, u, v, axisa=-5, axisb=2) assert_raises(AxisError, np.cross, u, v, axisa=1, axisb=-4) - # gh-5885 - u = np.ones((3, 4, 2)) - for axisc in range(-2, 2): - assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4)) def test_uint8_int32_mixed_dtypes(self): # regression test for gh-19138 @@ -4227,7 +4190,6 @@ def test_shape_mismatch_error_message(self): np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7]) @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") - @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") def test_signatures(self): sig_new = inspect.signature(np.broadcast) assert len(sig_new.parameters) == 1 @@ -4254,6 +4216,12 @@ def test_raise(self): class TestTensordot: + def test_rejects_duplicate_axes(self): + a = np.ones((2, 3, 3)) + b = np.ones((3, 3, 4)) + with pytest.raises(ValueError): + np.tensordot(a, b, axes=([1, 1], [0, 0])) + def test_zero_dimension(self): # Test resolution to issue #5663 a = np.ndarray((3, 0)) diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index 5763a964c41d..193b253d4f92 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -5,14 +5,8 @@ import numpy as np import numpy._core.numerictypes as nt -from numpy._core.numerictypes import issctype, maximum_sctype, sctype2char, sctypes -from numpy.testing import ( - IS_PYPY, - assert_, - assert_equal, - assert_raises, - assert_raises_regex, -) +from numpy._core.numerictypes import issctype, sctype2char, sctypes +from numpy.testing import assert_, assert_equal, assert_raises, assert_raises_regex # This is the structure of the table used for plain objects: # @@ -66,9 +60,9 @@ ('z', 'u1')] NbufferT = [ - # x Info color info y z - # value y2 Info2 name z2 Name Value - # name value y3 z3 + # depth1: x Info color info y z + # depth2: value y2 Info2 name z2 Name Value + # depth3: name value y3 z3 ([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True), b'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), ([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False), @@ -263,7 +257,7 @@ def test_access_top_fields(self): assert_equal(h['z'], np.array([self._buffer[0][5], self._buffer[1][5]], dtype='u1')) - def test_nested1_acessors(self): + def test_nested1_accessors(self): """Check reading the nested fields of a nested array (1st level)""" h = np.array(self._buffer, dtype=self._descr) if not self.multiple_rows: @@ -293,7 +287,7 @@ def test_nested1_acessors(self): self._buffer[1][3][1]], dtype='c16')) - def test_nested2_acessors(self): + def test_nested2_accessors(self): """Check reading the nested fields of a nested array (2nd level)""" h = np.array(self._buffer, dtype=self._descr) if not self.multiple_rows: @@ -341,7 +335,7 @@ class TestReadValuesNestedMultiple(ReadValuesNested): class TestEmptyField: def test_assign(self): a = np.arange(10, dtype=np.float32) - a.dtype = [("int", "<0i4"), ("float", "<2f4")] + a = a.view(dtype=[("int", "<0i4"), ("float", "<2f4")]) assert_(a['int'].shape == (5, 0)) assert_(a['float'].shape == (5, 2)) @@ -496,38 +490,6 @@ def test_ulong(self): assert np.dtype(np.ulong).itemsize == np.dtype(np.long).itemsize -@pytest.mark.filterwarnings("ignore:.*maximum_sctype.*:DeprecationWarning") -class TestMaximumSctype: - - # note that parametrizing with sctype['int'] and similar would skip types - # with the same size (gh-11923) - - @pytest.mark.parametrize( - 't', [np.byte, np.short, np.intc, np.long, np.longlong] - ) - def test_int(self, t): - assert_equal(maximum_sctype(t), np._core.sctypes['int'][-1]) - - @pytest.mark.parametrize( - 't', [np.ubyte, np.ushort, np.uintc, np.ulong, np.ulonglong] - ) - def test_uint(self, t): - assert_equal(maximum_sctype(t), np._core.sctypes['uint'][-1]) - - @pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble]) - def test_float(self, t): - assert_equal(maximum_sctype(t), np._core.sctypes['float'][-1]) - - @pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble]) - def test_complex(self, t): - assert_equal(maximum_sctype(t), np._core.sctypes['complex'][-1]) - - @pytest.mark.parametrize('t', [np.bool, np.object_, np.str_, np.bytes_, - np.void]) - def test_other(self, t): - assert_equal(maximum_sctype(t), t) - - class Test_sctype2char: # This function is old enough that we're really just documenting the quirks # at this point. @@ -575,10 +537,10 @@ def test_issctype(rep, expected): assert_equal(actual, expected) -@pytest.mark.skipif(sys.flags.optimize > 1, - reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") -@pytest.mark.xfail(IS_PYPY, - reason="PyPy cannot modify tp_doc after PyType_Ready") +@pytest.mark.skipif( + sys.flags.optimize > 1, + reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1", +) class TestDocStrings: def test_platform_dependent_aliases(self): if np.int64 is np.int_: @@ -611,7 +573,7 @@ def test_names_reflect_attributes(self, t): assert getattr(np, t.__name__) is t @pytest.mark.parametrize('t', numeric_types) - def test_names_are_undersood_by_dtype(self, t): + def test_names_are_understood_by_dtype(self, t): """ Test the dtype constructor maps names back to the type """ assert np.dtype(t.__name__).type is t diff --git a/numpy/_core/tests/test_print.py b/numpy/_core/tests/test_print.py index 95a177b57a7d..c6203ec27559 100644 --- a/numpy/_core/tests/test_print.py +++ b/numpy/_core/tests/test_print.py @@ -180,8 +180,8 @@ def test_scalar_format(): f"failed with val {val}, type {valtype}") except ValueError as e: assert_(False, - "format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" % - (fmat, repr(val), repr(valtype), str(e))) + f"format raised exception (fmt='{fmat}', {val=}, " + f"type={valtype!r}, exc='{e}')") # diff --git a/numpy/_core/tests/test_records.py b/numpy/_core/tests/test_records.py index 7ed6ea7687ff..80f76a865eda 100644 --- a/numpy/_core/tests/test_records.py +++ b/numpy/_core/tests/test_records.py @@ -269,7 +269,7 @@ def test_recarray_conflict_fields(self): ra.mean = [1.1, 2.2, 3.3] assert_array_almost_equal(ra['mean'], [1.1, 2.2, 3.3]) assert_(type(ra.mean) is type(ra.var)) - ra.shape = (1, 3) + ra = ra.reshape((1, 3)) assert_(ra.shape == (1, 3)) ra.shape = ['A', 'B', 'C'] assert_array_equal(ra['shape'], [['A', 'B', 'C']]) @@ -542,3 +542,51 @@ def test_find_duplicate(): l3 = [2, 2, 1, 4, 1, 6, 2, 3] assert_(np.rec.find_duplicate(l3) == [2, 1]) + + +class TestPatternMatching: + """Tests for structural pattern matching support (PEP 634).""" + + def test_match_sequence_pattern_1d(self): + dt = np.dtype([('x', 'i4'), ('y', 'f8')]) + arr = np.array([(1, 1.5), (2, 2.5), (3, 3.5)], dtype=dt).view(np.recarray) + match arr: + case [a, b, c]: + assert a.x == 1 and a.y == 1.5 + assert b.x == 2 and b.y == 2.5 + assert c.x == 3 and c.y == 3.5 + case _: + raise AssertionError("1D recarray did not match sequence pattern") + + def test_match_sequence_pattern_2d(self): + dt = np.dtype([('x', 'i4'), ('y', 'f8')]) + arr = np.array([[(1, 1.5), (2, 2.5)], [(3, 3.5), (4, 4.5)]], + dtype=dt).view(np.recarray) + match arr: + case [row1, row2]: + assert_array_equal(row1.x, [1, 2]) + assert_array_equal(row2.x, [3, 4]) + case _: + raise AssertionError("2D recarray did not match sequence pattern") + + def test_match_sequence_pattern_3d(self): + dt = np.dtype([('x', 'i4'), ('y', 'f8')]) + arr = np.array([[[(1, 1.5), (2, 2.5)], [(3, 3.5), (4, 4.5)]], + [[(5, 5.5), (6, 6.5)], [(7, 7.5), (8, 8.5)]]], + dtype=dt).view(np.recarray) + # outer matching + match arr: + case [plane1, plane2]: + assert_array_equal(plane1.x, [[1, 2], [3, 4]]) + assert_array_equal(plane2.x, [[5, 6], [7, 8]]) + case _: + raise AssertionError("3D recarray did not match sequence pattern") + # inner matching + match arr: + case [[row1, row2], [row3, row4]]: + assert_array_equal(row1.x, [1, 2]) + assert_array_equal(row2.x, [3, 4]) + assert_array_equal(row3.x, [5, 6]) + assert_array_equal(row4.x, [7, 8]) + case _: + raise AssertionError("3D recarray did not match sequence pattern") diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index dc457b2d5fc1..ef04a241dcc5 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -1,5 +1,7 @@ import copy +import datetime import gc +import os import pickle import sys import tempfile @@ -17,8 +19,6 @@ from numpy.testing import ( HAS_REFCOUNT, IS_64BIT, - IS_PYPY, - IS_PYSTON, IS_WASM, _assert_valid_refcount, assert_, @@ -29,9 +29,14 @@ assert_raises, assert_raises_regex, ) -from numpy.testing._private.utils import _no_tracing, requires_memory +from numpy.testing._private.utils import ( + _no_tracing, + requires_deep_recursion, + requires_memory, +) +@pytest.mark.filterwarnings(r"ignore:\w+ chararray \w+:DeprecationWarning") class TestRegression: def test_invalid_round(self): # Ticket #3 @@ -109,7 +114,8 @@ def test_noncontiguous_fill(self): def rs(): b.shape = (10,) - assert_raises(AttributeError, rs) + with pytest.warns(DeprecationWarning): # gh-29536 + assert_raises(AttributeError, rs) def test_bool(self): # Ticket #60 @@ -461,7 +467,8 @@ def test_lexsort_zerolen_element(self): assert np.lexsort((xs,)).shape[0] == xs.shape[0] @pytest.mark.filterwarnings( - "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", + ) def test_pickle_py2_bytes_encoding(self): # Check that arrays and scalars pickled on Py2 are # unpickleable on Py3 using encoding='bytes' @@ -649,7 +656,8 @@ def test_reshape_zero_strides(self): def test_reshape_zero_size(self): # GitHub Issue #2700, setting shape failed for 0-sized arrays a = np.ones((0, 2)) - a.shape = (-1, 2) + with pytest.warns(DeprecationWarning): + a.shape = (-1, 2) def test_reshape_trailing_ones_strides(self): # GitHub issue gh-2949, bad strides for trailing ones of new shape @@ -1066,13 +1074,14 @@ def test_dot_alignment_sse2(self): x = np.zeros((30, 40)) for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): y = pickle.loads(pickle.dumps(x, protocol=proto)) - # y is now typically not aligned on a 8-byte boundary + # y is now typically not aligned on an 8-byte boundary z = np.ones((1, y.shape[0])) # This shouldn't cause a segmentation fault: np.dot(z, y) @pytest.mark.filterwarnings( - "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", + ) def test_astype_copy(self): # Ticket #788, changeset r5155 # The test data file was generated by scipy.io.savemat. @@ -1085,18 +1094,24 @@ def test_astype_copy(self): assert_(xp.__array_interface__['data'][0] != xpd.__array_interface__['data'][0]) + @pytest.mark.filterwarnings( + "error:Implicit casting of output.*:DeprecationWarning", + ) def test_compress_small_type(self): # Ticket #789, changeset 5217. # compress with out argument segfaulted if cannot cast safely import numpy as np a = np.array([[1, 2], [3, 4]]) b = np.zeros((2, 1), dtype=np.single) + a.compress([True, False], axis=1, out=b) + assert_equal(b, np.array([[1.0], [3.0]])) try: - a.compress([True, False], axis=1, out=b) - raise AssertionError("compress with an out which cannot be " - "safely casted should not return " - "successfully") - except TypeError: + # Previously the above already failed (and that is OK) but take + # currently allows same-kind casting for the output. + a.compress([True, False], axis=1, out=np.empty((2, 1), dtype=bool)) + raise AssertionError("Expected TypeError due to unsafe out cast") + except DeprecationWarning: + # After deprecation remove TypeError the warnings filter. pass def test_attributes(self): @@ -1192,8 +1207,8 @@ def test_char_array_creation(self): def test_unaligned_unicode_access(self): # Ticket #825 for i in range(1, 9): - msg = 'unicode offset: %d chars' % i - t = np.dtype([('a', 'S%d' % i), ('b', 'U2')]) + msg = f'unicode offset: {i} chars' + t = np.dtype([('a', f'S{i}'), ('b', 'U2')]) x = np.array([(b'a', 'b')], dtype=t) assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg) @@ -1286,15 +1301,9 @@ def test_blasdot_uninitialized_memory(self): for k in range(3): # Try to ensure that x->data contains non-zero floats x = np.array([123456789e199], dtype=np.float64) - if IS_PYPY: - x.resize((m, 0), refcheck=False) - else: - x.resize((m, 0)) + x.resize((m, 0)) y = np.array([123456789e199], dtype=np.float64) - if IS_PYPY: - y.resize((0, n), refcheck=False) - else: - y.resize((0, n)) + y.resize((0, n)) # `dot` should just return zero (m, n) matrix z = np.dot(x, y) @@ -1457,22 +1466,6 @@ def test_structured_arrays_with_objects1(self): x[x.nonzero()] = x.ravel()[:1] assert_(x[0, 1] == x[0, 0]) - @pytest.mark.skipif( - sys.version_info >= (3, 12), - reason="Python 3.12 has immortal refcounts, this test no longer works." - ) - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_structured_arrays_with_objects2(self): - # Ticket #1299 second test - stra = 'aaaa' - strb = 'bbbb' - numb = sys.getrefcount(strb) - numa = sys.getrefcount(stra) - x = np.array([[(0, stra), (1, strb)]], 'i8,O') - x[x.nonzero()] = x.ravel()[:1] - assert_(sys.getrefcount(strb) == numb) - assert_(sys.getrefcount(stra) == numa + 2) - def test_duplicate_title_and_name(self): # Ticket #1254 dtspec = [(('a', 'a'), 'i'), ('b', 'i')] @@ -1580,8 +1573,7 @@ class Subclass(np.ndarray): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_take_refcount(self): # ticket #939 - a = np.arange(16, dtype=float) - a.shape = (4, 4) + a = np.arange(16, dtype=float).reshape((4, 4)) lut = np.ones((5 + 3, 4), float) rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype) c1 = sys.getrefcount(rgba) @@ -1777,8 +1769,7 @@ def test_reduce_contiguous(self): assert_(a.flags.f_contiguous) assert_(b.flags.c_contiguous) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_object_array_self_reference(self): # Object arrays with references to themselves can cause problems a = np.array(0, dtype=object) @@ -1787,8 +1778,7 @@ def test_object_array_self_reference(self): assert_raises(RecursionError, float, a) a[()] = None - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_object_array_circular_reference(self): # Test the same for a circular reference. a = np.array(0, dtype=object) @@ -1863,7 +1853,7 @@ def test_ticket_1756(self): s = b'0123456789abcdef' a = np.array([s] * 5) for i in range(1, 17): - a1 = np.array(a, "|S%d" % i) + a1 = np.array(a, f"|S{i}") a2 = np.array([s[:i]] * 5) assert_equal(a1, a2) @@ -1924,13 +1914,20 @@ def test_pickle_bytes_overwrite(self): assert_equal(bytestring[0:1], '\x01'.encode('ascii')) @pytest.mark.filterwarnings( - "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", + ) + @pytest.mark.xfail("LSAN_OPTIONS" in os.environ, reason="known leak", run=False) def test_pickle_py2_array_latin1_hack(self): # Check that unpickling hacks in Py3 that support # encoding='latin1' work correctly. # Python2 output for pickle.dumps(numpy.array([129], dtype='b')) - data = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\np13\ntp14\nb." + data = ( + b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\n" + b"ndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\n" + b"cnumpy\ndtype\np7\n(S'i1'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'" + b"\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\np13\ntp14\nb." + ) # This should work: result = pickle.loads(data, encoding='latin1') assert_array_equal(result, np.array([129]).astype('b')) @@ -1938,7 +1935,8 @@ def test_pickle_py2_array_latin1_hack(self): assert_raises(Exception, pickle.loads, data, encoding='koi8-r') @pytest.mark.filterwarnings( - "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", + ) def test_pickle_py2_scalar_latin1_hack(self): # Check that scalar unpickling hack in Py3 that supports # encoding='latin1' work correctly. @@ -2012,7 +2010,6 @@ def test_assign_obj_listoflists(self): a[...] = [[1, 2]] assert_equal(a, [[1, 2], [1, 2]]) - @pytest.mark.slow_pypy def test_memoryleak(self): # Ticket #1917 - ensure that array data doesn't leak for i in range(1000): @@ -2290,8 +2287,6 @@ def test_reshape_size_overflow(self): new_shape = (2, 7, 7, 43826197) assert_raises(ValueError, a.reshape, new_shape) - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_invalid_structured_dtypes(self): # gh-2865 # mapping python objects to other dtypes @@ -2321,7 +2316,12 @@ def test_correct_hash_dict(self): # gh-8887 - __hash__ would be None despite tp_hash being set all_types = set(np._core.sctypeDict.values()) - {np.void} for t in all_types: - val = t() + if t is np.timedelta64: + val = t(0, 's') + elif t is np.datetime64: + val = t('NAT', 'D') + else: + val = t() try: hash(val) @@ -2340,6 +2340,7 @@ def test_scalar_copy(self): np.bytes_: b"a", np.str_: "a", np.datetime64: "2017-08-25", + np.timedelta64: datetime.timedelta(days=1) } for sctype in scalar_types: item = sctype(values.get(sctype, 1)) @@ -2360,21 +2361,12 @@ def test_void_getitem(self): assert_(np.array([b'abc'], 'V3').astype('O') == b'abc') assert_(np.array([b'abcd'], 'V4').astype('O') == b'abcd') - def test_structarray_title(self): - # The following used to segfault on pypy, due to NPY_TITLE_KEY - # not working properly and resulting to double-decref of the - # structured array field items: - # See: https://bitbucket.org/pypy/pypy/issues/2789 - for j in range(5): - structure = np.array([1], dtype=[(('x', 'X'), np.object_)]) - structure[0]['x'] = np.array([2]) - gc.collect() - def test_dtype_scalar_squeeze(self): # gh-11384 values = { 'S': b"a", 'M': "2018-06-20", + 'm': datetime.timedelta(days=3), } for ch in np.typecodes['All']: if ch in 'O': @@ -2556,7 +2548,6 @@ def test_nonbool_logical(self): expected = np.ones(size, dtype=np.bool) assert_array_equal(np.logical_and(a, b), expected) - @pytest.mark.skipif(IS_PYPY, reason="PyPy issue 2742") def test_gh_23737(self): with pytest.raises(TypeError, match="not an acceptable base type"): class Y(np.flexible): @@ -2570,7 +2561,10 @@ def test_load_ufunc_pickle(self): # ufuncs are pickled with a semi-private path in # numpy.core._multiarray_umath and must be loadable without warning # despite np.core being deprecated. - test_data = b'\x80\x04\x95(\x00\x00\x00\x00\x00\x00\x00\x8c\x1cnumpy.core._multiarray_umath\x94\x8c\x03add\x94\x93\x94.' + test_data = ( + b'\x80\x04\x95(\x00\x00\x00\x00\x00\x00\x00\x8c\x1cnumpy.core.' + b'_multiarray_umath\x94\x8c\x03add\x94\x93\x94.' + ) result = pickle.loads(test_data, encoding='bytes') assert result is np.add diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index b993a8f3df29..3cb00dc6ab64 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -12,7 +12,7 @@ import numpy as np from numpy._core import sctypes -from numpy.testing import IS_PYPY, assert_equal, assert_raises +from numpy.testing import assert_equal, assert_raises class TestAsIntegerRatio: @@ -257,7 +257,6 @@ def test_array_wrap(scalar): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") class TestSignature: # test that scalar types have a valid __text_signature__ or __signature__ set @pytest.mark.parametrize( diff --git a/numpy/_core/tests/test_scalarbuffer.py b/numpy/_core/tests/test_scalarbuffer.py index 4d2744b85e53..f467c060d6fc 100644 --- a/numpy/_core/tests/test_scalarbuffer.py +++ b/numpy/_core/tests/test_scalarbuffer.py @@ -134,7 +134,7 @@ def test_str_ucs4(self, s): v = memoryview(s) assert self._as_dict(v) == expected - # integers of the paltform-appropriate endianness + # integers of the platform-appropriate endianness code_points = np.frombuffer(v, dtype='i4') assert_equal(code_points, [ord(c) for c in s]) diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index bfbc9a54cbfe..877ea8b8ffba 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -15,7 +15,6 @@ from numpy._utils import _pep440 from numpy.exceptions import ComplexWarning from numpy.testing import ( - IS_PYPY, _gen_alignment_data, assert_, assert_almost_equal, @@ -70,8 +69,8 @@ def test_type_add(self): # skipped ahead based on the first argument, but that # does not produce properly symmetric results... assert_equal(c_scalar.dtype, c_array.dtype, - "error with types (%d/'%c' + %d/'%c')" % - (k, np.dtype(atype).char, l, np.dtype(btype).char)) + f"error with types ({k}/{np.dtype(atype).char!r} + " + f"{l}/{np.dtype(btype).char!r})") def test_type_create(self): for atype in types: @@ -276,7 +275,7 @@ def test_modular_power(self): a = 5 b = 4 c = 10 - expected = pow(a, b, c) # noqa: F841 + expected = pow(a, b, c) for t in (np.int32, np.float32, np.complex64): # note that 3-operand power only dispatches on the first argument assert_raises(TypeError, operator.pow, t(a), b, c) @@ -398,7 +397,7 @@ def test_inplace_floordiv_handling(self): a //= b class TestComparison: - def test_comparision_different_types(self): + def test_comparison_different_types(self): x = np.array(1) y = np.array('s') eq = x == y @@ -520,14 +519,6 @@ def test_int_from_infinite_longdouble(self): x = np.clongdouble(np.inf) assert_raises(OverflowError, int, x) - @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)") - def test_int_from_infinite_longdouble___int__(self): - x = np.longdouble(np.inf) - assert_raises(OverflowError, x.__int__) - with pytest.warns(ComplexWarning): - x = np.clongdouble(np.inf) - assert_raises(OverflowError, x.__int__) - @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), reason="long double is same as double") @pytest.mark.skipif(platform.machine().startswith("ppc"), @@ -596,18 +587,18 @@ def test_scalar_comparison_to_none(self): assert_(not np.float32(1) == None) # noqa: E711 assert_(not np.str_('test') == None) # noqa: E711 # This is dubious (see below): - assert_(not np.datetime64('NaT') == None) # noqa: E711 + assert_(not np.datetime64('NaT', 'D') == None) # noqa: E711 assert_(np.float32(1) != None) # noqa: E711 assert_(np.str_('test') != None) # noqa: E711 # This is dubious (see below): - assert_(np.datetime64('NaT') != None) # noqa: E711 + assert_(np.datetime64('NaT', 'D') != None) # noqa: E711 assert_(len(w) == 0) # For documentation purposes, this is why the datetime is dubious. # At the time of deprecation this was no behaviour change, but # it has to be considered when the deprecations are done. - assert_(np.equal(np.datetime64('NaT'), None)) + assert_(np.equal(np.datetime64('NaT', 'D'), None)) #class TestRepr: @@ -652,18 +643,16 @@ def test_float_repr(self): self._test_type_repr(t) -if not IS_PYPY: - # sys.getsizeof() is not valid on PyPy - class TestSizeOf: +class TestSizeOf: - def test_equal_nbytes(self): - for type in types: - x = type(0) - assert_(sys.getsizeof(x) > x.nbytes) + def test_equal_nbytes(self): + for type in types: + x = type(0) + assert_(sys.getsizeof(x) > x.nbytes) - def test_error(self): - d = np.float32() - assert_raises(TypeError, d.__sizeof__, "a") + def test_error(self): + d = np.float32() + assert_raises(TypeError, d.__sizeof__, "a") class TestMultiply: @@ -674,8 +663,13 @@ def test_seq_repeat(self): # change. accepted_types = set(np.typecodes["AllInteger"]) deprecated_types = {'?'} + datetime_types = set(np.typecodes['Datetime']) forbidden_types = ( - set(np.typecodes["All"]) - accepted_types - deprecated_types) + set(np.typecodes["All"]) + - accepted_types + - deprecated_types + - datetime_types + ) forbidden_types -= {'V'} # can't default-construct void scalars for seq_type in (list, tuple): @@ -695,6 +689,11 @@ def test_seq_repeat(self): assert_raises(TypeError, operator.mul, seq, i) assert_raises(TypeError, operator.mul, i, seq) + for numpy_type in datetime_types: + i = np.dtype(numpy_type).type(1, "D") + assert_raises(TypeError, operator.mul, seq, i) + assert_raises(TypeError, operator.mul, i, seq) + def test_no_seq_repeat_basic_array_like(self): # Test that an array-like which does not know how to be multiplied # does not attempt sequence repeat (raise TypeError). @@ -1055,7 +1054,7 @@ def rop_func(self, other): # inheritance has to override, or this is correctly lost: res = op(myf_simple1(1), myf_simple2(2)) - assert type(res) == sctype or type(res) == np.bool + assert type(res) is sctype or type(res) is np.bool assert op(myf_simple1(1), myf_simple2(2)) == op(1, 2) # inherited # Two independent subclasses do not really define an order. This could @@ -1077,7 +1076,7 @@ def test_longdouble_complex(): def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp): # This tests that python scalar subclasses behave like a float64 (if they # don't override it). - # In an earlier version of NEP 50, they behaved like the Python buildins. + # In an earlier version of NEP 50, they behaved like the Python builtins. def op_func(self, other): return __op__ @@ -1092,7 +1091,7 @@ def rop_func(self, other): assert op(myt(1), np.float64(2)) == __op__ assert op(np.float64(1), myt(2)) == __rop__ - if op in {operator.mod, operator.floordiv} and subtype == complex: + if op in {operator.mod, operator.floordiv} and subtype is complex: return # module is not support for complex. Do not test. if __rop__ == __op__: @@ -1106,12 +1105,11 @@ def rop_func(self, other): res = op(myt(1), np.float16(2)) expected = op(behaves_like(1), np.float16(2)) assert res == expected - assert type(res) == type(expected) + assert type(res) is type(expected) res = op(np.float32(2), myt(1)) expected = op(np.float32(2), behaves_like(1)) assert res == expected - assert type(res) == type(expected) - + assert type(res) is type(expected) # Same check for longdouble (compare via dtype to accept float64 when # longdouble has the identical size), which is currently not perfectly # consistent. diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 5be3d05bbf11..e8a842ba5589 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -24,7 +24,6 @@ ) from numpy.exceptions import AxisError from numpy.testing import ( - IS_PYPY, assert_, assert_array_equal, assert_equal, @@ -389,10 +388,6 @@ def test_concatenate_same_value(self): with pytest.raises(ValueError, match="^casting must be one of"): concatenate([r4, r4], casting="same_value") - @pytest.mark.skipif( - IS_PYPY, - reason="PYPY handles sq_concat, nb_add differently than cpython" - ) def test_operator_concat(self): import operator a = array([1, 2]) diff --git a/numpy/_core/tests/test_simd.py b/numpy/_core/tests/test_simd.py index 335abc98c84e..e0979925384d 100644 --- a/numpy/_core/tests/test_simd.py +++ b/numpy/_core/tests/test_simd.py @@ -1227,8 +1227,8 @@ def trunc_div(a, d): continue dividend = self.load(self._data(dividend)) data_divc = [trunc_div(a, divisor) for a in dividend] - divisor_parms = self.divisor(divisor) - divc = self.divc(dividend, divisor_parms) + divisor_params = self.divisor(divisor) + divc = self.divc(dividend, divisor_params) assert divc == data_divc def test_arithmetic_reduce_sum(self): diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 492894087aa9..b3edddab0ae5 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -11,7 +11,7 @@ import numpy as np from numpy._core.tests._natype import pd_NA from numpy.dtypes import StringDType -from numpy.testing import IS_PYPY, assert_array_equal +from numpy.testing import assert_array_equal def random_unicode_string_list(): @@ -596,10 +596,7 @@ def test_concatenate(string_list): def test_resize_method(string_list): sarr = np.array(string_list, dtype="T") - if IS_PYPY: - sarr.resize(len(string_list) + 3, refcheck=False) - else: - sarr.resize(len(string_list) + 3) + sarr.resize(len(string_list) + 3) assert_array_equal(sarr, np.array(string_list + [''] * 3, dtype="T")) @@ -1144,7 +1141,7 @@ def test_center_promoter(): np.datetime64("1923-04-14T12:43:12"), np.datetime64("1994-06-21T14:43:15"), np.datetime64("2001-10-15T04:10:32"), - np.datetime64("NaT"), + np.datetime64("NaT", "D"), np.datetime64("1995-11-25T16:02:16"), np.datetime64("2005-01-04T03:14:12"), np.datetime64("2041-12-03T14:05:03"), @@ -1155,7 +1152,7 @@ def test_center_promoter(): np.timedelta64(12358, "s"), np.timedelta64(23, "s"), np.timedelta64(74, "s"), - np.timedelta64("NaT"), + np.timedelta64("NaT", "s"), np.timedelta64(23, "s"), np.timedelta64(73, "s"), np.timedelta64(7, "s"), @@ -1206,8 +1203,8 @@ def test_nat_casts(): s = 'nat' all_nats = itertools.product(*zip(s.upper(), s.lower())) all_nats = list(map(''.join, all_nats)) - NaT_dt = np.datetime64('NaT') - NaT_td = np.timedelta64('NaT') + NaT_dt = np.datetime64('NaT', 'D') + NaT_td = np.timedelta64('NaT', 's') for na_object in [np._NoValue, None, np.nan, 'nat', '']: # numpy treats empty string and all case combinations of 'nat' as NaT dtype = StringDType(na_object=na_object) @@ -1457,7 +1454,7 @@ def test_unary(string_array, unicode_array, function_name): "strip", "lstrip", "rstrip", - "replace" + "replace", "zfill", ] diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index b36d1db76b20..a4eff7a0f7b1 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -5,7 +5,7 @@ import numpy as np from numpy._core._exceptions import _UFuncNoLoopError -from numpy.testing import IS_PYPY, assert_array_equal, assert_raises +from numpy.testing import assert_array_equal, assert_raises from numpy.testing._private.utils import requires_memory COMPARISONS = [ @@ -19,8 +19,6 @@ MAX = np.iinfo(np.int64).max -IS_PYPY_LT_7_3_16 = IS_PYPY and sys.implementation.version < (7, 3, 16) - @pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) def test_mixed_string_comparison_ufuncs_fail(op, ufunc, sym): arr_string = np.array(["a", "b"], dtype="S") @@ -194,6 +192,14 @@ def test_large_string_cast(): a.astype("U") +@pytest.mark.parametrize("dt", ["S1", "U1"]) +def test_in_place_multiply_no_overflow(dt): + # see gh-30495 + a = np.array("a", dtype=dt) + a *= 20 + assert_array_equal(a, np.array("a", dtype=dt)) + + @pytest.mark.parametrize("dt", ["S", "U", "T"]) class TestMethods: @@ -1123,10 +1129,7 @@ def test_replace_unicode(self, buf, old, new, count, res, dt): '\U0001D7F6', '\U00011066', '\U000104A0', - pytest.param('\U0001F107', marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISALNUM", - strict=True)), + '\U0001F107', ]) def test_isalnum_unicode(self, in_, dt): in_ = np.array(in_, dtype=dt) @@ -1140,10 +1143,7 @@ def test_isalnum_unicode(self, in_, dt): ('\U0001F40D', False), ('\U0001F46F', False), ('\u2177', True), - pytest.param('\U00010429', True, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISLOWER", - strict=True)), + ('\U00010429', True), ('\U0001044E', True), ]) def test_islower_unicode(self, in_, out, dt): @@ -1158,10 +1158,7 @@ def test_islower_unicode(self, in_, out, dt): ('\U0001F40D', False), ('\U0001F46F', False), ('\u2177', False), - pytest.param('\U00010429', False, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISUPPER", - strict=True)), + ('\U00010429', False), ('\U0001044E', False), ]) def test_isupper_unicode(self, in_, out, dt): @@ -1171,15 +1168,9 @@ def test_isupper_unicode(self, in_, out, dt): @pytest.mark.parametrize("in_,out", [ ('\u1FFc', True), ('Greek \u1FFcitlecases ...', True), - pytest.param('\U00010401\U00010429', True, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISISTITLE", - strict=True)), + ('\U00010401\U00010429', True), ('\U00010427\U0001044E', True), - pytest.param('\U00010429', False, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISISTITLE", - strict=True)), + ('\U00010429', False), ('\U0001044E', False), ('\U0001F40D', False), ('\U0001F46F', False), diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 09d01eab8186..443f739f1b1e 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -17,7 +17,6 @@ from numpy.exceptions import AxisError from numpy.testing import ( HAS_REFCOUNT, - IS_PYPY, IS_WASM, assert_, assert_allclose, @@ -215,7 +214,6 @@ def test_pickle_withstring(self): b"(S'numpy._core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.") assert_(pickle.loads(astring) is np.cos) - @pytest.mark.skipif(IS_PYPY, reason="'is' check does not work on PyPy") def test_pickle_name_is_qualname(self): # This tests that a simplification of our ufunc pickle code will # lead to allowing qualnames as names. Future ufuncs should @@ -897,6 +895,19 @@ def test_vecdot_matvec_vecmat_complex(self, ufunc, shape1, shape2, conj): expected3 = expected1.astype(object) assert_array_equal(actual3, expected3) + @pytest.mark.parametrize("func", [ + lambda A, x, **kw: np.matvec(A, x, **kw), + lambda A, x, **kw: np.vecmat(x, A, **kw), + ]) + def test_matvec_vecmat_out(self, func): + # overlapping memory: out=input should not produce zeros + a = np.arange(18, dtype=float).reshape(2, 3, 3) + b = np.arange(6, dtype=float).reshape(2, 3) + expected = func(a, b) + c = func(a, b, out=b) + assert c is b + assert_allclose(c, expected) + def test_vecdot_subclass(self): class MySubclass(np.ndarray): pass @@ -911,6 +922,13 @@ def test_vecdot_object_no_conjugate(self): with pytest.raises(AttributeError, match="conjugate"): np.vecdot(arr, arr) + def test_vecdot_object_empty_is_zero(self): + x = np.empty((0,), dtype=object) + assert np.vecdot(x, x) == 0 + + x2 = np.empty((1, 0), dtype=object) + assert_array_equal(np.vecdot(x2, x2), np.array([0], dtype=object)) + def test_vecdot_object_breaks_outer_loop_on_error(self): arr1 = np.ones((3, 3)).astype(object) arr2 = arr1.copy() @@ -1751,6 +1769,10 @@ def test_where_warns(self): # Sanity check assert np.all(result1[::2] == [0, 4, 8, 12]) assert np.all(result2[::2] == [0, 4, 8, 12]) + # Also no warning for where=True + result3 = np.add(a, a, where=True) + # Sanity check + assert_array_equal(result3, a + a) @staticmethod def identityless_reduce_arrs(): @@ -1765,7 +1787,7 @@ def identityless_reduce_arrs(): # Not contiguous and not aligned a = np.empty((3 * 4 * 5 * 8 + 1,), dtype='i1') a = a[1:].view(dtype='f8') - a.shape = (3, 4, 5) + a = a.reshape((3, 4, 5)) a = a[1:, 1:, 1:] yield a @@ -2261,14 +2283,14 @@ def test_cast_index_fastpath(self): np.add.at(arr, index, values) assert arr[0] == len(values) - @pytest.mark.parametrize("value", [ - np.ones(1), np.ones(()), np.float64(1.), 1.]) - def test_ufunc_at_scalar_value_fastpath(self, value): - arr = np.zeros(1000) - # index must be cast, which may be buffered in chunks: - index = np.repeat(np.arange(1000), 2) - np.add.at(arr, index, value) - assert_array_equal(arr, np.full_like(arr, 2 * value)) + def test_ufunc_at_scalar_value_fastpath(self): + values = [np.ones(1), np.ones(()), np.float64(1.), 1.] + for value in values: + arr = np.zeros(1000) + # index must be cast, which may be buffered in chunks: + index = np.repeat(np.arange(1000), 2) + np.add.at(arr, index, value) + assert_array_equal(arr, np.full_like(arr, 2 * value)) def test_ufunc_at_multiD(self): a = np.arange(9).reshape(3, 3) @@ -2762,21 +2784,27 @@ def test_invalid_args(self): # minimally check the exception text assert exc.match('loop of ufunc does not support') - @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) + @pytest.mark.parametrize( + "nat", [np.datetime64("nat", "s"), np.timedelta64("nat", "ns")] + ) def test_nat_is_not_finite(self, nat): try: assert not np.isfinite(nat) except TypeError: pass # ok, just not implemented - @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) + @pytest.mark.parametrize( + "nat", [np.datetime64("nat", "s"), np.timedelta64("nat", "ns")] + ) def test_nat_is_nan(self, nat): try: assert np.isnan(nat) except TypeError: pass # ok, just not implemented - @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) + @pytest.mark.parametrize( + "nat", [np.datetime64("nat", "s"), np.timedelta64("nat", "ns")] + ) def test_nat_is_not_inf(self, nat): try: assert not np.isinf(nat) @@ -2842,7 +2870,14 @@ def test_ufunc_types(ufunc): if 'O' in typ or '?' in typ: continue inp, out = typ.split('->') - args = [np.ones((3, 3), t) for t in inp] + if 'm' in inp: + with pytest.warns( + DeprecationWarning, + match="The 'generic' unit for NumPy timedelta is deprecated", + ): + args = [np.ones((3, 3), t) for t in inp] + else: + args = [np.ones((3, 3), t) for t in inp] with warnings.catch_warnings(record=True): warnings.filterwarnings("always") res = ufunc(*args) @@ -2978,7 +3013,6 @@ def test_ufunc_input_floatingpoint_error(bad_offset): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") @pytest.mark.parametrize( "methodname", ["__call__", "accumulate", "at", "outer", "reduce", "reduceat", "resolve_dtypes"], diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 40b815f88984..ff61e7f3bafc 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -17,7 +17,6 @@ from numpy.testing import ( HAS_REFCOUNT, IS_MUSL, - IS_PYPY, IS_WASM, _gen_alignment_data, assert_, @@ -217,25 +216,25 @@ def __array_wrap__(self, arr, context=None, return_scalar=False): if subok: assert_(isinstance(r, ArrayWrap)) else: - assert_(type(r) == np.ndarray) + assert_(type(r) is np.ndarray) r = np.add(a, 2, None, subok=subok) if subok: assert_(isinstance(r, ArrayWrap)) else: - assert_(type(r) == np.ndarray) + assert_(type(r) is np.ndarray) r = np.add(a, 2, out=None, subok=subok) if subok: assert_(isinstance(r, ArrayWrap)) else: - assert_(type(r) == np.ndarray) + assert_(type(r) is np.ndarray) r = np.add(a, 2, out=(None,), subok=subok) if subok: assert_(isinstance(r, ArrayWrap)) else: - assert_(type(r) == np.ndarray) + assert_(type(r) is np.ndarray) d = ArrayWrap([5.7]) o1 = np.empty((1,)) @@ -245,31 +244,31 @@ def __array_wrap__(self, arr, context=None, return_scalar=False): if subok: assert_(isinstance(r2, ArrayWrap)) else: - assert_(type(r2) == np.ndarray) + assert_(type(r2) is np.ndarray) r1, r2 = np.frexp(d, o1, None, subok=subok) if subok: assert_(isinstance(r2, ArrayWrap)) else: - assert_(type(r2) == np.ndarray) + assert_(type(r2) is np.ndarray) r1, r2 = np.frexp(d, None, o2, subok=subok) if subok: assert_(isinstance(r1, ArrayWrap)) else: - assert_(type(r1) == np.ndarray) + assert_(type(r1) is np.ndarray) r1, r2 = np.frexp(d, out=(o1, None), subok=subok) if subok: assert_(isinstance(r2, ArrayWrap)) else: - assert_(type(r2) == np.ndarray) + assert_(type(r2) is np.ndarray) r1, r2 = np.frexp(d, out=(None, o2), subok=subok) if subok: assert_(isinstance(r1, ArrayWrap)) else: - assert_(type(r1) == np.ndarray) + assert_(type(r1) is np.ndarray) with assert_raises(TypeError): # Out argument must be tuple, since there are multiple outputs. @@ -657,7 +656,10 @@ def test_zero_division_complex(self): def test_floor_division_complex(self): # check that floor division, divmod and remainder raises type errors - x = np.array([.9 + 1j, -.1 + 1j, .9 + .5 * 1j, .9 + 2. * 1j], dtype=np.complex128) + x = np.array( + [.9 + 1j, -.1 + 1j, .9 + .5 * 1j, .9 + 2. * 1j], + dtype=np.complex128, + ) with pytest.raises(TypeError): x // 7 with pytest.raises(TypeError): @@ -704,7 +706,11 @@ def test_floor_division_corner_cases(self, dtype): fzer = np.array(0.0, dtype=dtype) finf = np.array(np.inf, dtype=dtype) with warnings.catch_warnings(): - warnings.filterwarnings('ignore', "invalid value encountered in floor_divide", RuntimeWarning) + warnings.filterwarnings( + 'ignore', + "invalid value encountered in floor_divide", + RuntimeWarning, + ) div = np.floor_divide(fnan, fone) assert np.isnan(div), f"div: {div}" div = np.floor_divide(fone, fnan) @@ -860,8 +866,16 @@ def test_float_divmod_corner_cases(self): fzer = np.array(0.0, dtype=dt) finf = np.array(np.inf, dtype=dt) with warnings.catch_warnings(): - warnings.filterwarnings('ignore', "invalid value encountered in divmod", RuntimeWarning) - warnings.filterwarnings('ignore', "divide by zero encountered in divmod", RuntimeWarning) + warnings.filterwarnings( + "ignore", + "invalid value encountered in divmod", + RuntimeWarning, + ) + warnings.filterwarnings( + "ignore", + "divide by zero encountered in divmod", + RuntimeWarning, + ) div, rem = np.divmod(fone, fzer) assert np.isinf(div), f'dt: {dt}, div: {rem}' assert np.isnan(rem), f'dt: {dt}, rem: {rem}' @@ -899,8 +913,16 @@ def test_float_remainder_corner_cases(self): # Check nans, inf with warnings.catch_warnings(): - warnings.filterwarnings('ignore', "invalid value encountered in remainder", RuntimeWarning) - warnings.filterwarnings('ignore', "invalid value encountered in fmod", RuntimeWarning) + warnings.filterwarnings( + "ignore", + "invalid value encountered in remainder", + RuntimeWarning, + ) + warnings.filterwarnings( + "ignore", + "invalid value encountered in fmod", + RuntimeWarning, + ) for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) @@ -1121,15 +1143,36 @@ def test_power_complex(self): assert_almost_equal(x**3, [(1 + 2j)**3, (2 + 3j)**3, (3 + 4j)**3]) assert_almost_equal(x**4, [(1 + 2j)**4, (2 + 3j)**4, (3 + 4j)**4]) assert_almost_equal(x**(-1), [1 / (1 + 2j), 1 / (2 + 3j), 1 / (3 + 4j)]) - assert_almost_equal(x**(-2), [1 / (1 + 2j)**2, 1 / (2 + 3j)**2, 1 / (3 + 4j)**2]) - assert_almost_equal(x**(-3), [(-11 + 2j) / 125, (-46 - 9j) / 2197, - (-117 - 44j) / 15625]) - assert_almost_equal(x**(0.5), [ncu.sqrt(1 + 2j), ncu.sqrt(2 + 3j), - ncu.sqrt(3 + 4j)]) + assert_almost_equal( + x**(-2), + [1 / (1 + 2j)**2, + 1 / (2 + 3j)**2, + 1 / (3 + 4j)**2], + ) + assert_almost_equal( + x**(-3), + [(-11 + 2j) / 125, + (-46 - 9j) / 2197, + (-117 - 44j) / 15625], + ) + assert_almost_equal( + x**(0.5), + [ncu.sqrt(1 + 2j), + ncu.sqrt(2 + 3j), + ncu.sqrt(3 + 4j)], + ) norm = 1. / ((x**14)[0]) - assert_almost_equal(x**14 * norm, - [i * norm for i in [-76443 + 16124j, 23161315 + 58317492j, - 5583548873 + 2465133864j]]) + assert_almost_equal( + x**14 * norm, + [ + i * norm + for i in [ + -76443 + 16124j, + 23161315 + 58317492j, + 5583548873 + 2465133864j, + ] + ], + ) # Ticket #836 def assert_complex_equal(x, y): @@ -1297,7 +1340,7 @@ def test_log2_ints(self, i): # a good log2 implementation should provide this, # might fail on OS with bad libm v = np.log2(2.**i) - assert_equal(v, float(i), err_msg='at exponent %d' % i) + assert_equal(v, float(i), err_msg=f'at exponent {i}') @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_log2_special(self): @@ -1413,8 +1456,16 @@ def test_log_strides(self): y_true = np.log(x_f64) y_special = np.log(x_special) for jj in strides: - assert_array_almost_equal_nulp(np.log(x_f64[::jj]), y_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.log(x_special[::jj]), y_special[::jj], nulp=2) + assert_array_almost_equal_nulp( + np.log(x_f64[::jj]), + y_true[::jj], + nulp=2, + ) + assert_array_almost_equal_nulp( + np.log(x_special[::jj]), + y_special[::jj], + nulp=2, + ) # Reference values were computed with mpmath, with mp.dps = 200. @pytest.mark.parametrize( @@ -1464,7 +1515,11 @@ def test_exp_strides(self): x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1, size=ii)) y_true = np.exp(x_f64) for jj in strides: - assert_array_almost_equal_nulp(np.exp(x_f64[::jj]), y_true[::jj], nulp=2) + assert_array_almost_equal_nulp( + np.exp(x_f64[::jj]), + y_true[::jj], + nulp=2, + ) class TestSpecialFloats: def test_exp_values(self): @@ -1904,15 +1959,53 @@ def test_divide_spurious_fpexception(self, dtype): np.zeros(128 + 1, dtype=dt) / subnorm class TestFPClass: - @pytest.mark.parametrize("stride", [-5, -4, -3, -2, -1, 1, - 2, 4, 5, 6, 7, 8, 9, 10]) + @pytest.mark.parametrize( + "stride", + [-5, -4, -3, -2, -1, 1, 2, 4, 5, 6, 7, 8, 9, 10], + ) def test_fpclass(self, stride): - arr_f64 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.2251e-308], dtype='d') - arr_f32 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 1.4013e-045, -1.4013e-045], dtype='f') - nan = np.array([True, True, False, False, False, False, False, False, False, False]) # noqa: E221 - inf = np.array([False, False, True, True, False, False, False, False, False, False]) # noqa: E221 - sign = np.array([False, True, False, True, True, False, True, False, False, True]) # noqa: E221 - finite = np.array([False, False, False, False, True, True, True, True, True, True]) # noqa: E221 + arr_f64 = np.array( + [ + np.nan, + -np.nan, + np.inf, + -np.inf, + -1.0, + 1.0, + -0.0, + 0.0, + 2.2251e-308, + -2.2251e-308, + ], + dtype="d", + ) + arr_f32 = np.array( + [ + np.nan, + -np.nan, + np.inf, + -np.inf, + -1.0, + 1.0, + -0.0, + 0.0, + 1.4013e-045, + -1.4013e-045, + ], + dtype="f", + ) + nan = np.array( + [True, True, False, False, False, False, False, False, False, False], + ) + inf = np.array( + [False, False, True, True, False, False, False, False, False, False], + ) + sign = np.array( + [False, True, False, True, True, False, True, False, False, True], + ) + finite = np.array( + [False, False, False, False, True, True, True, True, True, True], + ) assert_equal(np.isnan(arr_f32[::stride]), nan[::stride]) assert_equal(np.isnan(arr_f64[::stride]), nan[::stride]) assert_equal(np.isinf(arr_f32[::stride]), inf[::stride]) @@ -2007,18 +2100,29 @@ def test_ldexp(self, dtype, stride): mant = np.array([0.125, 0.25, 0.5, 1., 1., 2., 4., 8.], dtype=dtype) exp = np.array([3, 2, 1, 0, 0, -1, -2, -3], dtype='i') out = np.zeros(8, dtype=dtype) - assert_equal(np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), np.ones(8, dtype=dtype)[::stride]) + assert_equal( + np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), + np.ones(8, dtype=dtype)[::stride], + ) assert_equal(out[::stride], np.ones(8, dtype=dtype)[::stride]) class TestFRExp: @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) @pytest.mark.parametrize("dtype", ['f', 'd']) - @pytest.mark.skipif(not sys.platform.startswith('linux'), - reason="np.frexp gives different answers for NAN/INF on windows and linux") + @pytest.mark.skipif( + not sys.platform.startswith('linux'), + reason="np.frexp gives different answers for NAN/INF on windows and linux", + ) @pytest.mark.xfail(IS_MUSL, reason="gh23049") def test_frexp(self, dtype, stride): - arr = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], dtype=dtype) - mant_true = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], dtype=dtype) + arr = np.array( + [np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], + dtype=dtype, + ) + mant_true = np.array( + [np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], + dtype=dtype, + ) exp_true = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i') out_mant = np.ones(8, dtype=dtype) out_exp = 2 * np.ones(8, dtype='i') @@ -2101,8 +2205,16 @@ def test_sincos_float32(self): assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2) # test aliasing(issue #17761) tx_f32 = x_f32.copy() - assert_array_max_ulp(np.sin(x_f32, out=x_f32), np.float32(np.sin(x_f64)), maxulp=2) - assert_array_max_ulp(np.cos(tx_f32, out=tx_f32), np.float32(np.cos(x_f64)), maxulp=2) + assert_array_max_ulp( + np.sin(x_f32, out=x_f32), + np.float32(np.sin(x_f64)), + maxulp=2, + ) + assert_array_max_ulp( + np.cos(tx_f32, out=tx_f32), + np.float32(np.cos(x_f64)), + maxulp=2, + ) def test_strided_float32(self): np.random.seed(42) @@ -2117,10 +2229,12 @@ def test_strided_float32(self): sin_true = np.sin(x_f32_large) cos_true = np.cos(x_f32_large) for jj in strides: - assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.sin(x_f32_large[::jj]), sin_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.cos(x_f32_large[::jj]), cos_true[::jj], nulp=2) + x_slice = x_f32[::jj] + x_large = x_f32_large[::jj] + assert_array_almost_equal_nulp(np.exp(x_slice), exp_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.log(x_slice), log_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.sin(x_large), sin_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.cos(x_large), cos_true[::jj], nulp=2) class TestLogAddExp(_FilterInvalids): def test_logaddexp_values(self): @@ -2242,24 +2356,38 @@ def test_no_fpe(self): def assert_arctan2_isnan(x, y): - assert_(np.isnan(ncu.arctan2(x, y)), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not nan") + assert_( + np.isnan(ncu.arctan2(x, y)), + f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not nan", + ) def assert_arctan2_ispinf(x, y): - assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +inf") + assert_( + (np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), + f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +inf", + ) def assert_arctan2_isninf(x, y): - assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -inf") + assert_( + (np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), + f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -inf", + ) def assert_arctan2_ispzero(x, y): - assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +0") + assert_( + (ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), + f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +0", + ) def assert_arctan2_isnzero(x, y): - assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -0") - + assert_( + (ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), + f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -0", + ) class TestArctan2SpecialValues: def test_one_one(self): @@ -2423,9 +2551,15 @@ def test_strided_array(self): out_maxtrue = np.array([-2.0, 1.0, 1.0, 10.0, 1.0, 1.0, np.nan, 1.0]) assert_equal(np.maximum(arr1, arr2), maxtrue) assert_equal(np.maximum(arr1[::2], arr2[::2]), maxtrue[::2]) - assert_equal(np.maximum(arr1[:4:], arr2[::2]), np.array([-2.0, np.nan, 10.0, 1.0])) + assert_equal( + np.maximum(arr1[:4:], arr2[::2]), + np.array([-2.0, np.nan, 10.0, 1.0]), + ) assert_equal(np.maximum(arr1[::3], arr2[:3:]), np.array([-2.0, 0.0, np.nan])) - assert_equal(np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-2.0, 10., np.nan])) + assert_equal( + np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), + np.array([-2.0, 10.0, np.nan]), + ) assert_equal(out, out_maxtrue) def test_precision(self): @@ -2515,9 +2649,15 @@ def test_strided_array(self): out_mintrue = np.array([-4.0, 1.0, 1.0, 1.0, 1.0, 1.0, np.nan, 1.0]) assert_equal(np.minimum(arr1, arr2), mintrue) assert_equal(np.minimum(arr1[::2], arr2[::2]), mintrue[::2]) - assert_equal(np.minimum(arr1[:4:], arr2[::2]), np.array([-4.0, np.nan, 0.0, 0.0])) + assert_equal( + np.minimum(arr1[:4:], arr2[::2]), + np.array([-4.0, np.nan, 0.0, 0.0]), + ) assert_equal(np.minimum(arr1[::3], arr2[:3:]), np.array([-4.0, -1.0, np.nan])) - assert_equal(np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-4.0, 1.0, np.nan])) + assert_equal( + np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), + np.array([-4.0, 1.0, np.nan]), + ) assert_equal(out, out_mintrue) def test_precision(self): @@ -3513,6 +3653,19 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): 'axis': 'axis0', 'initial': 'init0', 'where': 'where0'}) + # reduce, kwargs, out=None is removed + res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out=None, + keepdims='keep0', initial='init0', + where='where0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduce') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype': 'dtype0', + 'keepdims': 'keep0', + 'axis': 'axis0', + 'initial': 'init0', + 'where': 'where0'}) # reduce, output equal to None removed, but not other explicit ones, # even if they are at their default value. @@ -3702,7 +3855,7 @@ def _unwrap(self, objs): for obj in objs: if isinstance(obj, cls): obj = np.array(obj) - elif type(obj) != np.ndarray: + elif type(obj) is not np.ndarray: return NotImplemented result.append(obj) return result @@ -4048,13 +4201,10 @@ def test_array_ufunc_direct_call(self): assert_array_equal(res, a + a) @pytest.mark.thread_unsafe(reason="modifies global module") - @pytest.mark.skipif(IS_PYPY, reason="__signature__ descriptor dance fails") def test_ufunc_docstring(self): original_doc = np.add.__doc__ new_doc = "new docs" - expected_dict = ( - {} if IS_PYPY else {"__module__": "numpy", "__qualname__": "add"} - ) + expected_dict = {"__module__": "numpy", "__qualname__": "add"} expected_dict["__signature__"] = inspect.signature(np.add) np.add.__doc__ = new_doc @@ -4313,7 +4463,15 @@ def test_branch_cuts_complex64(self): _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) _check_branch_cut(np.arctan, [0 - 2j, 2j], [1, 1], -1, 1, True, np.complex64) - _check_branch_cut(np.arcsinh, [0 - 2j, 2j], [1, 1], -1, 1, True, np.complex64) + _check_branch_cut( + np.arcsinh, + [0 - 2j, 2j], + [1, 1], + -1, + 1, + True, + np.complex64, + ) _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64) _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) @@ -4322,9 +4480,33 @@ def test_branch_cuts_complex64(self): _check_branch_cut(np.arccos, [0 - 2j, 2j], [ 1, 1], 1, 1, False, np.complex64) _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64) - _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64) - _check_branch_cut(np.arccosh, [0 - 2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64) - _check_branch_cut(np.arctanh, [0 - 2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64) + _check_branch_cut( + np.arcsinh, + [-2, 2, 0], + [1j, 1j, 1], + 1, + 1, + False, + np.complex64, + ) + _check_branch_cut( + np.arccosh, + [0 - 2j, 2j, 2], + [1, 1, 1j], + 1, + 1, + False, + np.complex64, + ) + _check_branch_cut( + np.arctanh, + [0 - 2j, 2j, 0], + [1, 1, 1j], + 1, + 1, + False, + np.complex64, + ) def test_against_cmath(self): import cmath @@ -4482,8 +4664,8 @@ class TestSubclass: def test_subclass_op(self): class simple(np.ndarray): - def __new__(subtype, shape): - self = np.ndarray.__new__(subtype, shape, dtype=object) + def __new__(cls, shape): + self = np.ndarray.__new__(cls, shape, dtype=object) self.fill(0) return self @@ -4512,7 +4694,10 @@ def mul(a, b): # with no identity (not reorderable) mul_ufunc = np.frompyfunc(mul, nin=2, nout=1) assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) - assert_raises(ValueError, lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1))) + assert_raises( + ValueError, + lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), + ) assert_raises(ValueError, lambda: mul_ufunc.reduce([])) @@ -4567,15 +4752,28 @@ def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, x = x0[jr] x.real = ncu.NZERO ym = f(x) - assert_(np.all(np.absolute(y0[jr].real - ym.real * re_sign) < atol), (y0[jr], ym)) - assert_(np.all(np.absolute(y0[jr].imag - ym.imag * im_sign) < atol), (y0[jr], ym)) + assert_( + np.all(np.absolute(y0[jr].real - ym.real * re_sign) < atol), + (y0[jr], ym), + ) + assert_( + np.all(np.absolute(y0[jr].imag - ym.imag * im_sign) < atol), + (y0[jr], ym), + ) if np.any(ji): x = x0[ji] x.imag = ncu.NZERO ym = f(x) - assert_(np.all(np.absolute(y0[ji].real - ym.real * re_sign) < atol), (y0[ji], ym)) - assert_(np.all(np.absolute(y0[ji].imag - ym.imag * im_sign) < atol), (y0[ji], ym)) + assert_( + np.all(np.absolute(y0[ji].real - ym.real * re_sign) < atol), + (y0[ji], ym), + ) + assert_( + np.all(np.absolute(y0[ji].imag - ym.imag * im_sign) < atol), + (y0[ji], ym), + ) + def test_copysign(): assert_(np.copysign(1, -1) == -1) @@ -4842,18 +5040,18 @@ class BadArr1(np.ndarray): def __array_finalize__(self, obj): # The outer call reshapes to 3 dims, try to do a bad reshape. if self.ndim == 3: - self.shape = self.shape + (1,) + self._set_shape(self.shape + (1,)) class BadArr2(np.ndarray): def __array_finalize__(self, obj): if isinstance(obj, BadArr2): # outer inserts 1-sized dims. In that case disturb them. if self.shape[-1] == 1: - self.shape = self.shape[::-1] + self._set_shape(self.shape[::-1]) for cls in [BadArr1, BadArr2]: arr = np.ones((2, 3)).view(cls) - with assert_raises(TypeError) as a: + with pytest.raises(TypeError): # The first array gets reshaped (not the second one) np.add.outer(arr, [1, 2]) @@ -4913,9 +5111,28 @@ def test_bad_legacy_gufunc_silent_errors(x1): ncu_tests.always_error_gufunc(x1, 0.0) +class TestReplaceLoopBySignature: + """Tests for PyUFunc_ReplaceLoopBySignature C API.""" + + @pytest.mark.thread_unsafe(reason="modifies ufunc within test") + def test_replace_loop(self): + # Call the ufunc first to populate any internal dispatch caches, + # then replace the float64 loop with one that outputs 42.0, + # verify the replacement is used, and restore the original. + a = np.array([1.0, 2.0, 3.0]) + assert_array_equal(np.negative(a), [-1.0, -2.0, -3.0]) + + saved = ncu_tests.replace_loop(np.negative) + try: + assert_array_equal(np.negative(a), [42.0, 42.0, 42.0]) + finally: + ncu_tests.restore_loop(np.negative, saved) + + assert_array_equal(np.negative(a), [-1.0, -2.0, -3.0]) + + class TestAddDocstring: @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") - @pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") def test_add_same_docstring(self): # test for attributes (which are C-level defined) ncu.add_docstring(np.ndarray.flat, np.ndarray.flat.__doc__) @@ -4942,16 +5159,6 @@ def func(): ncu.add_docstring(func, "different docstring") -class TestAdd_newdoc_ufunc: - @pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning") - def test_ufunc_arg(self): - assert_raises(TypeError, ncu._add_newdoc_ufunc, 2, "blah") - assert_raises(ValueError, ncu._add_newdoc_ufunc, np.add, "blah") - - @pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning") - def test_string_arg(self): - assert_raises(TypeError, ncu._add_newdoc_ufunc, np.add, 3) - class TestHypotErrorMessages: def test_hypot_error_message_single_arg(self): with pytest.raises(TypeError, match="hypot\\(\\) takes .* but 1 was given"): diff --git a/numpy/_core/tests/test_umath_accuracy.py b/numpy/_core/tests/test_umath_accuracy.py index 3ca2f508672e..cfed5a40931e 100644 --- a/numpy/_core/tests/test_umath_accuracy.py +++ b/numpy/_core/tests/test_umath_accuracy.py @@ -21,8 +21,6 @@ IS_AVX = __cpu_features__.get('AVX512F', False) or \ (__cpu_features__.get('FMA3', False) and __cpu_features__.get('AVX2', False)) -IS_AVX512FP16 = __cpu_features__.get('AVX512FP16', False) - # only run on linux with AVX, also avoid old glibc (numpy/numpy#20448). runtest = (sys.platform.startswith('linux') and IS_AVX and not _glibc_older_than("2.17")) @@ -84,8 +82,6 @@ def test_validate_transcendentals(self): maxulperr = data_subset['ulperr'].max() assert_array_max_ulp(npfunc(inval), outval, maxulperr) - @pytest.mark.skipif(IS_AVX512FP16, - reason="SVML FP16 have slightly higher ULP errors") @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) def test_validate_fp16_transcendentals(self, ufunc): with np.errstate(all='ignore'): @@ -94,39 +90,3 @@ def test_validate_fp16_transcendentals(self, ufunc): datafp32 = datafp16.astype(np.float32) assert_array_max_ulp(ufunc(datafp16), ufunc(datafp32), maxulp=1, dtype=np.float16) - - @pytest.mark.skipif(not IS_AVX512FP16, - reason="lower ULP only apply for SVML FP16") - def test_validate_svml_fp16(self): - max_ulp_err = { - "arccos": 2.54, - "arccosh": 2.09, - "arcsin": 3.06, - "arcsinh": 1.51, - "arctan": 2.61, - "arctanh": 1.88, - "cbrt": 1.57, - "cos": 1.43, - "cosh": 1.33, - "exp2": 1.33, - "exp": 1.27, - "expm1": 0.53, - "log": 1.80, - "log10": 1.27, - "log1p": 1.88, - "log2": 1.80, - "sin": 1.88, - "sinh": 2.05, - "tan": 2.26, - "tanh": 3.00, - } - - with np.errstate(all='ignore'): - arr = np.arange(65536, dtype=np.int16) - datafp16 = np.frombuffer(arr.tobytes(), dtype=np.float16) - datafp32 = datafp16.astype(np.float32) - for func in max_ulp_err: - ufunc = getattr(np, func) - ulp = np.ceil(max_ulp_err[func]) - assert_array_max_ulp(ufunc(datafp16), ufunc(datafp32), - maxulp=ulp, dtype=np.float16) diff --git a/numpy/_core/tests/test_umath_complex.py b/numpy/_core/tests/test_umath_complex.py index 7012e7e357fe..d998bf64ff71 100644 --- a/numpy/_core/tests/test_umath_complex.py +++ b/numpy/_core/tests/test_umath_complex.py @@ -389,7 +389,7 @@ def test_scalar(self): n_r = [x[i] ** y[i] for i in lx] for i in lx: - assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + assert_almost_equal(n_r[i], p_r[i], err_msg=f'Loop {i}\n') def test_array(self): x = np.array([1, 1j, 2, 2.5 + .37j, np.inf, np.nan]) @@ -409,7 +409,7 @@ def test_array(self): n_r = x ** y for i in lx: - assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + assert_almost_equal(n_r[i], p_r[i], err_msg=f'Loop {i}\n') class TestCabs: def setup_method(self): @@ -610,15 +610,15 @@ def test_array(self, stride, astype, func): ('cont_chisq', '= (3, 12): - from typing import TypeAliasType - - from ._array_like import ArrayLike as _ArrayLikeAlias - from ._dtype_like import DTypeLike as _DTypeLikeAlias - - ArrayLike = TypeAliasType("ArrayLike", _ArrayLikeAlias) - DTypeLike = TypeAliasType("DTypeLike", _DTypeLikeAlias) - -else: - from ._array_like import ArrayLike as ArrayLike - from ._dtype_like import DTypeLike as DTypeLike diff --git a/numpy/_typing/_add_docstring.py b/numpy/_typing/_add_docstring.py index 5330a6b3b715..883b890a1a16 100644 --- a/numpy/_typing/_add_docstring.py +++ b/numpy/_typing/_add_docstring.py @@ -137,10 +137,10 @@ def _parse_docstrings() -> str: >>> import numpy.typing as npt >>> print(npt.NDArray) - numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[~_ScalarT]] + NDArray >>> print(npt.NDArray[np.float64]) - numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[numpy.float64]] + NDArray[numpy.float64] >>> NDArrayInt = npt.NDArray[np.int_] >>> a: NDArrayInt = np.arange(10) diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index 6b071f4a0319..5c249775f810 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -1,27 +1,18 @@ -import sys -from collections.abc import Callable, Collection, Sequence -from typing import TYPE_CHECKING, Any, Protocol, TypeAlias, TypeVar, runtime_checkable +from collections.abc import Buffer, Callable, Collection +from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable import numpy as np -from numpy import dtype - -from ._nbit_base import _32Bit, _64Bit -from ._nested_sequence import _NestedSequence -from ._shape import _AnyShape if TYPE_CHECKING: - StringDType = np.dtypes.StringDType + from numpy.dtypes import StringDType else: - # at runtime outside of type checking importing this from numpy.dtypes - # would lead to a circular import from numpy._core.multiarray import StringDType -_T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_DTypeT = TypeVar("_DTypeT", bound=dtype[Any]) -_DTypeT_co = TypeVar("_DTypeT_co", covariant=True, bound=dtype[Any]) +from ._nbit_base import _32Bit, _64Bit +from ._nested_sequence import _NestedSequence +from ._shape import _AnyShape -NDArray: TypeAlias = np.ndarray[_AnyShape, dtype[_ScalarT]] +type NDArray[ScalarT: np.generic] = np.ndarray[_AnyShape, np.dtype[ScalarT]] # The `_SupportsArray` protocol only cares about the default dtype # (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned @@ -29,8 +20,8 @@ # Concrete implementations of the protocol are responsible for adding # any and all remaining overloads @runtime_checkable -class _SupportsArray(Protocol[_DTypeT_co]): - def __array__(self) -> np.ndarray[Any, _DTypeT_co]: ... +class _SupportsArray[DTypeT: np.dtype](Protocol): + def __array__(self) -> np.ndarray[Any, DTypeT]: ... @runtime_checkable @@ -45,62 +36,57 @@ def __array_function__( ) -> object: ... -# TODO: Wait until mypy supports recursive objects in combination with typevars -_FiniteNestedSequence: TypeAlias = ( - _T - | Sequence[_T] - | Sequence[Sequence[_T]] - | Sequence[Sequence[Sequence[_T]]] - | Sequence[Sequence[Sequence[Sequence[_T]]]] -) - # A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic` -_ArrayLike: TypeAlias = ( - _SupportsArray[dtype[_ScalarT]] - | _NestedSequence[_SupportsArray[dtype[_ScalarT]]] +type _ArrayLike[ScalarT: np.generic] = ( + _SupportsArray[np.dtype[ScalarT]] + | _NestedSequence[_SupportsArray[np.dtype[ScalarT]]] ) # A union representing array-like objects; consists of two typevars: # One representing types that can be parametrized w.r.t. `np.dtype` # and another one for the rest -_DualArrayLike: TypeAlias = ( - _SupportsArray[_DTypeT] - | _NestedSequence[_SupportsArray[_DTypeT]] - | _T - | _NestedSequence[_T] +type _DualArrayLike[DTypeT: np.dtype, BuiltinT] = ( + _SupportsArray[DTypeT] + | _NestedSequence[_SupportsArray[DTypeT]] + | BuiltinT + | _NestedSequence[BuiltinT] ) -if sys.version_info >= (3, 12): - from collections.abc import Buffer as _Buffer -else: - @runtime_checkable - class _Buffer(Protocol): - def __buffer__(self, flags: int, /) -> memoryview: ... - -ArrayLike: TypeAlias = _Buffer | _DualArrayLike[dtype[Any], complex | bytes | str] +type ArrayLike = Buffer | _DualArrayLike[np.dtype, complex | bytes | str] # `ArrayLike_co`: array-like objects that can be coerced into `X` # given the casting rules `same_kind` -_ArrayLikeBool_co: TypeAlias = _DualArrayLike[dtype[np.bool], bool] -_ArrayLikeUInt_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.unsignedinteger], bool] -_ArrayLikeInt_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer], int] -_ArrayLikeFloat_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer | np.floating], float] -_ArrayLikeComplex_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.number], complex] -_ArrayLikeNumber_co: TypeAlias = _ArrayLikeComplex_co -_ArrayLikeTD64_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer | np.timedelta64], int] -_ArrayLikeDT64_co: TypeAlias = _ArrayLike[np.datetime64] -_ArrayLikeObject_co: TypeAlias = _ArrayLike[np.object_] - -_ArrayLikeVoid_co: TypeAlias = _ArrayLike[np.void] -_ArrayLikeBytes_co: TypeAlias = _DualArrayLike[dtype[np.bytes_], bytes] -_ArrayLikeStr_co: TypeAlias = _DualArrayLike[dtype[np.str_], str] -_ArrayLikeString_co: TypeAlias = _DualArrayLike[StringDType, str] -_ArrayLikeAnyString_co: TypeAlias = _DualArrayLike[dtype[np.character] | StringDType, bytes | str] - -__Float64_co: TypeAlias = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool -__Complex128_co: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool -_ArrayLikeFloat64_co: TypeAlias = _DualArrayLike[dtype[__Float64_co], float] -_ArrayLikeComplex128_co: TypeAlias = _DualArrayLike[dtype[__Complex128_co], complex] +type _ArrayLikeBool_co = _DualArrayLike[np.dtype[np.bool], bool] +type _ArrayLikeUInt_co = _DualArrayLike[np.dtype[np.bool | np.unsignedinteger], bool] +type _ArrayLikeInt_co = _DualArrayLike[np.dtype[np.bool | np.integer], int] +type _ArrayLikeFloat_co = _DualArrayLike[ + np.dtype[np.bool | np.integer | np.floating], + float, +] +type _ArrayLikeComplex_co = _DualArrayLike[np.dtype[np.bool | np.number], complex] +type _ArrayLikeNumber_co = _ArrayLikeComplex_co +type _ArrayLikeTD64_co = _DualArrayLike[ + np.dtype[np.bool | np.integer | np.timedelta64], + int, +] +type _ArrayLikeDT64_co = _ArrayLike[np.datetime64] +type _ArrayLikeObject_co = _ArrayLike[np.object_] + +type _ArrayLikeVoid_co = _ArrayLike[np.void] +type _ArrayLikeBytes_co = _DualArrayLike[np.dtype[np.bytes_], bytes] +type _ArrayLikeStr_co = _DualArrayLike[np.dtype[np.str_], str] +type _ArrayLikeString_co = _DualArrayLike[StringDType, str] +type _ArrayLikeAnyString_co = _DualArrayLike[ + np.dtype[np.character] | StringDType, + bytes | str, +] + +type __Float64_co = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool +type __Complex128_co = ( + np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool +) +type _ArrayLikeFloat64_co = _DualArrayLike[np.dtype[__Float64_co], float] +type _ArrayLikeComplex128_co = _DualArrayLike[np.dtype[__Complex128_co], complex] # NOTE: This includes `builtins.bool`, but not `numpy.bool`. -_ArrayLikeInt: TypeAlias = _DualArrayLike[dtype[np.integer], int] +type _ArrayLikeInt = _DualArrayLike[np.dtype[np.integer], int] diff --git a/numpy/_typing/_char_codes.py b/numpy/_typing/_char_codes.py index 7b6fad228d56..518f9b473e4a 100644 --- a/numpy/_typing/_char_codes.py +++ b/numpy/_typing/_char_codes.py @@ -1,152 +1,104 @@ from typing import Literal -_BoolCodes = Literal[ - "bool", "bool_", - "?", "|?", "=?", "?", - "b1", "|b1", "=b1", "b1", -] # fmt: skip +type _BoolCodes = Literal["bool", "bool_", "?", "b1", "|b1", "=b1", "b1"] + +type _Int8Codes = Literal["int8", "byte", "b", "i1", "|i1", "=i1", "i1"] +type _Int16Codes = Literal["int16", "short", "h", "i2", "|i2", "=i2", "i2"] +type _Int32Codes = Literal["int32", "i4", "|i4", "=i4", "i4"] +type _Int64Codes = Literal["int64", "i8", "|i8", "=i8", "i8"] + +type _UInt8Codes = Literal["uint8", "ubyte", "B", "u1", "|u1", "=u1", "u1"] +type _UInt16Codes = Literal["uint16", "ushort", "H", "u2", "|u2", "=u2", "u2"] +type _UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] +type _UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] + +type _IntCCodes = Literal["intc", "i", "|i", "=i", "i"] +type _LongCodes = Literal["long", "l", "|l", "=l", "l"] +type _LongLongCodes = Literal["longlong", "q", "|q", "=q", "q"] +type _IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] + +type _UIntCCodes = Literal["uintc", "I", "|I", "=I", "I"] +type _ULongCodes = Literal["ulong", "L", "|L", "=L", "L"] +type _ULongLongCodes = Literal["ulonglong", "Q", "|Q", "=Q", "Q"] +type _UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] + +type _Float16Codes = Literal["float16", "half", "e", "f2", "|f2", "=f2", "f2"] +type _Float32Codes = Literal["float32", "single", "f", "f4", "|f4", "=f4", "f4"] +type _Float64Codes = Literal[ + "float64", "float", "double", "d", "f8", "|f8", "=f8", "f8" +] -_UInt8Codes = Literal["uint8", "u1", "|u1", "=u1", "u1"] -_UInt16Codes = Literal["uint16", "u2", "|u2", "=u2", "u2"] -_UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] -_UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] - -_Int8Codes = Literal["int8", "i1", "|i1", "=i1", "i1"] -_Int16Codes = Literal["int16", "i2", "|i2", "=i2", "i2"] -_Int32Codes = Literal["int32", "i4", "|i4", "=i4", "i4"] -_Int64Codes = Literal["int64", "i8", "|i8", "=i8", "i8"] - -_Float16Codes = Literal["float16", "f2", "|f2", "=f2", "f2"] -_Float32Codes = Literal["float32", "f4", "|f4", "=f4", "f4"] -_Float64Codes = Literal["float64", "f8", "|f8", "=f8", "f8"] - -_Complex64Codes = Literal["complex64", "c8", "|c8", "=c8", "c8"] -_Complex128Codes = Literal["complex128", "c16", "|c16", "=c16", "c16"] - -_ByteCodes = Literal["byte", "b", "|b", "=b", "b"] -_ShortCodes = Literal["short", "h", "|h", "=h", "h"] -_IntCCodes = Literal["intc", "i", "|i", "=i", "i"] -_IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] -_LongCodes = Literal["long", "l", "|l", "=l", "l"] -_IntCodes = _IntPCodes -_LongLongCodes = Literal["longlong", "q", "|q", "=q", "q"] - -_UByteCodes = Literal["ubyte", "B", "|B", "=B", "B"] -_UShortCodes = Literal["ushort", "H", "|H", "=H", "H"] -_UIntCCodes = Literal["uintc", "I", "|I", "=I", "I"] -_UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] -_ULongCodes = Literal["ulong", "L", "|L", "=L", "L"] -_UIntCodes = _UIntPCodes -_ULongLongCodes = Literal["ulonglong", "Q", "|Q", "=Q", "Q"] - -_HalfCodes = Literal["half", "e", "|e", "=e", "e"] -_SingleCodes = Literal["single", "f", "|f", "=f", "f"] -_DoubleCodes = Literal["double", "float", "d", "|d", "=d", "d"] -_LongDoubleCodes = Literal["longdouble", "g", "|g", "=g", "g"] - -_CSingleCodes = Literal["csingle", "F", "|F", "=F", "F"] -_CDoubleCodes = Literal["cdouble", "complex", "D", "|D", "=D", "D"] -_CLongDoubleCodes = Literal["clongdouble", "G", "|G", "=G", "G"] - -_StrCodes = Literal["str", "str_", "unicode", "U", "|U", "=U", "U"] -_BytesCodes = Literal["bytes", "bytes_", "S", "|S", "=S", "S"] -_VoidCodes = Literal["void", "V", "|V", "=V", "V"] -_ObjectCodes = Literal["object", "object_", "O", "|O", "=O", "O"] - -_DT64Codes = Literal[ - "datetime64", "|datetime64", "=datetime64", - "datetime64", - "datetime64[Y]", "|datetime64[Y]", "=datetime64[Y]", - "datetime64[Y]", - "datetime64[M]", "|datetime64[M]", "=datetime64[M]", - "datetime64[M]", - "datetime64[W]", "|datetime64[W]", "=datetime64[W]", - "datetime64[W]", - "datetime64[D]", "|datetime64[D]", "=datetime64[D]", - "datetime64[D]", - "datetime64[h]", "|datetime64[h]", "=datetime64[h]", - "datetime64[h]", - "datetime64[m]", "|datetime64[m]", "=datetime64[m]", - "datetime64[m]", - "datetime64[s]", "|datetime64[s]", "=datetime64[s]", - "datetime64[s]", - "datetime64[ms]", "|datetime64[ms]", "=datetime64[ms]", - "datetime64[ms]", - "datetime64[us]", "|datetime64[us]", "=datetime64[us]", - "datetime64[us]", - "datetime64[ns]", "|datetime64[ns]", "=datetime64[ns]", - "datetime64[ns]", - "datetime64[ps]", "|datetime64[ps]", "=datetime64[ps]", - "datetime64[ps]", - "datetime64[fs]", "|datetime64[fs]", "=datetime64[fs]", - "datetime64[fs]", - "datetime64[as]", "|datetime64[as]", "=datetime64[as]", - "datetime64[as]", - "M", "|M", "=M", "M", - "M8", "|M8", "=M8", "M8", - "M8[Y]", "|M8[Y]", "=M8[Y]", "M8[Y]", - "M8[M]", "|M8[M]", "=M8[M]", "M8[M]", - "M8[W]", "|M8[W]", "=M8[W]", "M8[W]", - "M8[D]", "|M8[D]", "=M8[D]", "M8[D]", - "M8[h]", "|M8[h]", "=M8[h]", "M8[h]", - "M8[m]", "|M8[m]", "=M8[m]", "M8[m]", - "M8[s]", "|M8[s]", "=M8[s]", "M8[s]", - "M8[ms]", "|M8[ms]", "=M8[ms]", "M8[ms]", - "M8[us]", "|M8[us]", "=M8[us]", "M8[us]", - "M8[ns]", "|M8[ns]", "=M8[ns]", "M8[ns]", - "M8[ps]", "|M8[ps]", "=M8[ps]", "M8[ps]", - "M8[fs]", "|M8[fs]", "=M8[fs]", "M8[fs]", - "M8[as]", "|M8[as]", "=M8[as]", "M8[as]", +type _LongDoubleCodes = Literal["longdouble", "g", "|g", "=g", "g"] + +type _Complex64Codes = Literal[ + "complex64", "csingle", "F", "c8", "|c8", "=c8", "c8" ] -_TD64Codes = Literal[ - "timedelta64", "|timedelta64", "=timedelta64", - "timedelta64", - "timedelta64[Y]", "|timedelta64[Y]", "=timedelta64[Y]", - "timedelta64[Y]", - "timedelta64[M]", "|timedelta64[M]", "=timedelta64[M]", - "timedelta64[M]", - "timedelta64[W]", "|timedelta64[W]", "=timedelta64[W]", - "timedelta64[W]", - "timedelta64[D]", "|timedelta64[D]", "=timedelta64[D]", - "timedelta64[D]", - "timedelta64[h]", "|timedelta64[h]", "=timedelta64[h]", - "timedelta64[h]", - "timedelta64[m]", "|timedelta64[m]", "=timedelta64[m]", - "timedelta64[m]", - "timedelta64[s]", "|timedelta64[s]", "=timedelta64[s]", - "timedelta64[s]", - "timedelta64[ms]", "|timedelta64[ms]", "=timedelta64[ms]", - "timedelta64[ms]", - "timedelta64[us]", "|timedelta64[us]", "=timedelta64[us]", - "timedelta64[us]", - "timedelta64[ns]", "|timedelta64[ns]", "=timedelta64[ns]", - "timedelta64[ns]", - "timedelta64[ps]", "|timedelta64[ps]", "=timedelta64[ps]", - "timedelta64[ps]", - "timedelta64[fs]", "|timedelta64[fs]", "=timedelta64[fs]", - "timedelta64[fs]", - "timedelta64[as]", "|timedelta64[as]", "=timedelta64[as]", - "timedelta64[as]", - "m", "|m", "=m", "m", - "m8", "|m8", "=m8", "m8", - "m8[Y]", "|m8[Y]", "=m8[Y]", "m8[Y]", - "m8[M]", "|m8[M]", "=m8[M]", "m8[M]", - "m8[W]", "|m8[W]", "=m8[W]", "m8[W]", - "m8[D]", "|m8[D]", "=m8[D]", "m8[D]", - "m8[h]", "|m8[h]", "=m8[h]", "m8[h]", - "m8[m]", "|m8[m]", "=m8[m]", "m8[m]", - "m8[s]", "|m8[s]", "=m8[s]", "m8[s]", - "m8[ms]", "|m8[ms]", "=m8[ms]", "m8[ms]", - "m8[us]", "|m8[us]", "=m8[us]", "m8[us]", - "m8[ns]", "|m8[ns]", "=m8[ns]", "m8[ns]", - "m8[ps]", "|m8[ps]", "=m8[ps]", "m8[ps]", - "m8[fs]", "|m8[fs]", "=m8[fs]", "m8[fs]", - "m8[as]", "|m8[as]", "=m8[as]", "m8[as]", + +type _Complex128Codes = Literal[ + "complex128", "complex", "cdouble", "D", "c16", "|c16", "=c16", "c16" ] +type _CLongDoubleCodes = Literal["clongdouble", "G", "|G", "=G", "G"] + +type _StrCodes = Literal["str", "str_", "unicode", "U", "|U", "=U", "U"] +type _BytesCodes = Literal["bytes", "bytes_", "S", "|S", "=S", "S"] +type _VoidCodes = Literal["void", "V", "|V", "=V", "V"] +type _ObjectCodes = Literal["object", "object_", "O", "|O", "=O", "O"] + +# datetime64 +type _DT64Codes_any = Literal["datetime64", "M", "M8", "|M8", "=M8", "M8"] +type _DT64Codes_date = Literal[ + "datetime64[Y]", "M8[Y]", "|M8[Y]", "=M8[Y]", "M8[Y]", + "datetime64[M]", "M8[M]", "|M8[M]", "=M8[M]", "M8[M]", + "datetime64[W]", "M8[W]", "|M8[W]", "=M8[W]", "M8[W]", + "datetime64[D]", "M8[D]", "|M8[D]", "=M8[D]", "M8[D]", +] # fmt: skip +type _DT64Codes_datetime = Literal[ + "datetime64[h]", "M8[h]", "|M8[h]", "=M8[h]", "M8[h]", + "datetime64[m]", "M8[m]", "|M8[m]", "=M8[m]", "M8[m]", + "datetime64[s]", "M8[s]", "|M8[s]", "=M8[s]", "M8[s]", + "datetime64[ms]", "M8[ms]", "|M8[ms]", "=M8[ms]", "M8[ms]", + "datetime64[us]", "M8[us]", "|M8[us]", "=M8[us]", "M8[us]", + "datetime64[Îŧs]", "M8[Îŧs]", "|M8[Îŧs]", "=M8[Îŧs]", "M8[Îŧs]", +] # fmt: skip +type _DT64Codes_int = Literal[ + "datetime64[ns]", "M8[ns]", "|M8[ns]", "=M8[ns]", "M8[ns]", + "datetime64[ps]", "M8[ps]", "|M8[ps]", "=M8[ps]", "M8[ps]", + "datetime64[fs]", "M8[fs]", "|M8[fs]", "=M8[fs]", "M8[fs]", + "datetime64[as]", "M8[as]", "|M8[as]", "=M8[as]", "M8[as]", +] # fmt: skip +type _DT64Codes = Literal[ + _DT64Codes_any, + _DT64Codes_date, + _DT64Codes_datetime, + _DT64Codes_int, +] + +# timedelta64 +type _TD64Codes_any = Literal["timedelta64", "m", "m8", "|m8", "=m8", "m8"] +type _TD64Codes_int = Literal[ + "timedelta64[Y]", "m8[Y]", "|m8[Y]", "=m8[Y]", "m8[Y]", + "timedelta64[M]", "m8[M]", "|m8[M]", "=m8[M]", "m8[M]", + "timedelta64[ns]", "m8[ns]", "|m8[ns]", "=m8[ns]", "m8[ns]", + "timedelta64[ps]", "m8[ps]", "|m8[ps]", "=m8[ps]", "m8[ps]", + "timedelta64[fs]", "m8[fs]", "|m8[fs]", "=m8[fs]", "m8[fs]", + "timedelta64[as]", "m8[as]", "|m8[as]", "=m8[as]", "m8[as]", +] # fmt: skip +type _TD64Codes_timedelta = Literal[ + "timedelta64[W]", "m8[W]", "|m8[W]", "=m8[W]", "m8[W]", + "timedelta64[D]", "m8[D]", "|m8[D]", "=m8[D]", "m8[D]", + "timedelta64[h]", "m8[h]", "|m8[h]", "=m8[h]", "m8[h]", + "timedelta64[m]", "m8[m]", "|m8[m]", "=m8[m]", "m8[m]", + "timedelta64[s]", "m8[s]", "|m8[s]", "=m8[s]", "m8[s]", + "timedelta64[ms]", "m8[ms]", "|m8[ms]", "=m8[ms]", "m8[ms]", + "timedelta64[us]", "m8[us]", "|m8[us]", "=m8[us]", "m8[us]", + "timedelta64[Îŧs]", "m8[Îŧs]", "|m8[Îŧs]", "=m8[Îŧs]", "m8[Îŧs]", +] # fmt: skip +type _TD64Codes = Literal[_TD64Codes_any, _TD64Codes_int, _TD64Codes_timedelta] + # NOTE: `StringDType' has no scalar type, and therefore has no name that can # be passed to the `dtype` constructor -_StringCodes = Literal["T", "|T", "=T", "T"] +type _StringCodes = Literal["T", "|T", "=T", "T"] # NOTE: Nested literals get flattened and de-duplicated at runtime, which isn't # the case for a `Union` of `Literal`s. @@ -154,54 +106,45 @@ # Another advantage of nesting, is that they always have a "flat" # `Literal.__args__`, which is a tuple of *literally* all its literal values. -_UnsignedIntegerCodes = Literal[ - _UInt8Codes, - _UInt16Codes, - _UInt32Codes, - _UInt64Codes, - _UIntCodes, - _UByteCodes, - _UShortCodes, - _UIntCCodes, - _ULongCodes, - _ULongLongCodes, -] -_SignedIntegerCodes = Literal[ +type _SignedIntegerCodes = Literal[ _Int8Codes, _Int16Codes, _Int32Codes, _Int64Codes, - _IntCodes, - _ByteCodes, - _ShortCodes, _IntCCodes, _LongCodes, _LongLongCodes, + _IntPCodes, +] +type _UnsignedIntegerCodes = Literal[ + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCCodes, + _ULongCodes, + _ULongLongCodes, + _UIntPCodes, ] -_FloatingCodes = Literal[ +type _FloatingCodes = Literal[ _Float16Codes, _Float32Codes, _Float64Codes, - _HalfCodes, - _SingleCodes, - _DoubleCodes, - _LongDoubleCodes + _LongDoubleCodes, ] -_ComplexFloatingCodes = Literal[ +type _ComplexFloatingCodes = Literal[ _Complex64Codes, _Complex128Codes, - _CSingleCodes, - _CDoubleCodes, _CLongDoubleCodes, ] -_IntegerCodes = Literal[_UnsignedIntegerCodes, _SignedIntegerCodes] -_InexactCodes = Literal[_FloatingCodes, _ComplexFloatingCodes] -_NumberCodes = Literal[_IntegerCodes, _InexactCodes] +type _IntegerCodes = Literal[_UnsignedIntegerCodes, _SignedIntegerCodes] +type _InexactCodes = Literal[_FloatingCodes, _ComplexFloatingCodes] +type _NumberCodes = Literal[_IntegerCodes, _InexactCodes] -_CharacterCodes = Literal[_StrCodes, _BytesCodes] -_FlexibleCodes = Literal[_VoidCodes, _CharacterCodes] +type _CharacterCodes = Literal[_BytesCodes, _StrCodes] +type _FlexibleCodes = Literal[_CharacterCodes, _VoidCodes] -_GenericCodes = Literal[ +type _GenericCodes = Literal[ _BoolCodes, _NumberCodes, _FlexibleCodes, diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index 34c4bd44f519..09ed1a0084de 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -1,5 +1,5 @@ -from collections.abc import Sequence # noqa: F811 -from typing import Any, Protocol, TypeAlias, TypedDict, TypeVar +from collections.abc import Sequence +from typing import Any, NotRequired, Protocol, TypedDict, runtime_checkable import numpy as np @@ -18,48 +18,43 @@ _VoidCodes, ) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, covariant=True) +type _DTypeLikeNested = Any # TODO: wait for support for recursive types -_DTypeLikeNested: TypeAlias = Any # TODO: wait for support for recursive types - -# Mandatory keys -class _DTypeDictBase(TypedDict): +class _DTypeDict(TypedDict): names: Sequence[str] formats: Sequence[_DTypeLikeNested] - - -# Mandatory + optional keys -class _DTypeDict(_DTypeDictBase, total=False): # Only `str` elements are usable as indexing aliases, # but `titles` can in principle accept any object - offsets: Sequence[int] - titles: Sequence[Any] - itemsize: int - aligned: bool + offsets: NotRequired[Sequence[int]] + titles: NotRequired[Sequence[Any]] + itemsize: NotRequired[int] + aligned: NotRequired[bool] -class _HasDType(Protocol[_DTypeT_co]): +# A protocol for anything with the dtype attribute +@runtime_checkable +class _HasDType[DTypeT: np.dtype](Protocol): @property - def dtype(self) -> _DTypeT_co: ... + def dtype(self) -> DTypeT: ... -class _HasNumPyDType(Protocol[_DTypeT_co]): +class _HasNumPyDType[DTypeT: np.dtype](Protocol): @property - def __numpy_dtype__(self, /) -> _DTypeT_co: ... + def __numpy_dtype__(self, /) -> DTypeT: ... -_SupportsDType: TypeAlias = _HasDType[_DTypeT] | _HasNumPyDType[_DTypeT] +type _SupportsDType[DTypeT: np.dtype] = _HasDType[DTypeT] | _HasNumPyDType[DTypeT] # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` -_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] +type _DTypeLike[ScalarT: np.generic] = ( + type[ScalarT] | np.dtype[ScalarT] | _SupportsDType[np.dtype[ScalarT]] +) # Would create a dtype[np.void] -_VoidDTypeLike: TypeAlias = ( +type _VoidDTypeLike = ( # If a tuple, then it can be either: # - (flexible_dtype, itemsize) # - (fixed_dtype, shape) @@ -80,31 +75,29 @@ def __numpy_dtype__(self, /) -> _DTypeT_co: ... # Aliases for commonly used dtype-like objects. # Note that the precision of `np.number` subclasses is ignored herein. -_DTypeLikeBool: TypeAlias = type[bool] | _DTypeLike[np.bool] | _BoolCodes -_DTypeLikeInt: TypeAlias = ( - type[int] | _DTypeLike[np.signedinteger] | _SignedIntegerCodes -) -_DTypeLikeUInt: TypeAlias = _DTypeLike[np.unsignedinteger] | _UnsignedIntegerCodes -_DTypeLikeFloat: TypeAlias = type[float] | _DTypeLike[np.floating] | _FloatingCodes -_DTypeLikeComplex: TypeAlias = ( +type _DTypeLikeBool = type[bool] | _DTypeLike[np.bool] | _BoolCodes +type _DTypeLikeInt = type[int] | _DTypeLike[np.signedinteger] | _SignedIntegerCodes +type _DTypeLikeUInt = _DTypeLike[np.unsignedinteger] | _UnsignedIntegerCodes +type _DTypeLikeFloat = type[float] | _DTypeLike[np.floating] | _FloatingCodes +type _DTypeLikeComplex = ( type[complex] | _DTypeLike[np.complexfloating] | _ComplexFloatingCodes ) -_DTypeLikeComplex_co: TypeAlias = ( +type _DTypeLikeComplex_co = ( type[complex] | _DTypeLike[np.bool | np.number] | _BoolCodes | _NumberCodes ) -_DTypeLikeDT64: TypeAlias = _DTypeLike[np.timedelta64] | _TD64Codes -_DTypeLikeTD64: TypeAlias = _DTypeLike[np.datetime64] | _DT64Codes -_DTypeLikeBytes: TypeAlias = type[bytes] | _DTypeLike[np.bytes_] | _BytesCodes -_DTypeLikeStr: TypeAlias = type[str] | _DTypeLike[np.str_] | _StrCodes -_DTypeLikeVoid: TypeAlias = ( +type _DTypeLikeDT64 = _DTypeLike[np.timedelta64] | _TD64Codes +type _DTypeLikeTD64 = _DTypeLike[np.datetime64] | _DT64Codes +type _DTypeLikeBytes = type[bytes] | _DTypeLike[np.bytes_] | _BytesCodes +type _DTypeLikeStr = type[str] | _DTypeLike[np.str_] | _StrCodes +type _DTypeLikeVoid = ( type[memoryview] | _DTypeLike[np.void] | _VoidDTypeLike | _VoidCodes ) -_DTypeLikeObject: TypeAlias = type[object] | _DTypeLike[np.object_] | _ObjectCodes +type _DTypeLikeObject = type[object] | _DTypeLike[np.object_] | _ObjectCodes # Anything that can be coerced into numpy.dtype. # Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html -DTypeLike: TypeAlias = _DTypeLike[Any] | _VoidDTypeLike | str +type DTypeLike = type | str | np.dtype | _SupportsDType[np.dtype] | _VoidDTypeLike # NOTE: while it is possible to provide the dtype as a dict of # dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), diff --git a/numpy/_typing/_nbit.py b/numpy/_typing/_nbit.py index 60bce3245c7a..1ad5f017eeb9 100644 --- a/numpy/_typing/_nbit.py +++ b/numpy/_typing/_nbit.py @@ -1,19 +1,17 @@ """A module with the precisions of platform-specific `~numpy.number`s.""" -from typing import TypeAlias - from ._nbit_base import _8Bit, _16Bit, _32Bit, _64Bit, _96Bit, _128Bit # To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin -_NBitByte: TypeAlias = _8Bit -_NBitShort: TypeAlias = _16Bit -_NBitIntC: TypeAlias = _32Bit -_NBitIntP: TypeAlias = _32Bit | _64Bit -_NBitInt: TypeAlias = _NBitIntP -_NBitLong: TypeAlias = _32Bit | _64Bit -_NBitLongLong: TypeAlias = _64Bit +type _NBitByte = _8Bit +type _NBitShort = _16Bit +type _NBitIntC = _32Bit +type _NBitIntP = _32Bit | _64Bit +type _NBitInt = _NBitIntP +type _NBitLong = _32Bit | _64Bit +type _NBitLongLong = _64Bit -_NBitHalf: TypeAlias = _16Bit -_NBitSingle: TypeAlias = _32Bit -_NBitDouble: TypeAlias = _64Bit -_NBitLongDouble: TypeAlias = _64Bit | _96Bit | _128Bit +type _NBitHalf = _16Bit +type _NBitSingle = _32Bit +type _NBitDouble = _64Bit +type _NBitLongDouble = _64Bit | _96Bit | _128Bit diff --git a/numpy/_typing/_nbit_base.py b/numpy/_typing/_nbit_base.py index 28d3e63c1769..28a60ecbe00f 100644 --- a/numpy/_typing/_nbit_base.py +++ b/numpy/_typing/_nbit_base.py @@ -30,14 +30,13 @@ class NBitBase: .. code-block:: python - >>> from typing import TypeVar, TYPE_CHECKING + >>> from typing import TYPE_CHECKING >>> import numpy as np >>> import numpy.typing as npt - >>> S = TypeVar("S", bound=npt.NBitBase) - >>> T = TypeVar("T", bound=npt.NBitBase) - - >>> def add(a: np.floating[S], b: np.integer[T]) -> np.floating[S | T]: + >>> def add[S: npt.NBitBase, T: npt.NBitBase]( + ... a: np.floating[S], b: np.integer[T] + ... ) -> np.floating[S | T]: ... return a + b >>> a = np.float16() diff --git a/numpy/_typing/_nbit_base.pyi b/numpy/_typing/_nbit_base.pyi index d88c9f4d9fd9..bd317c896094 100644 --- a/numpy/_typing/_nbit_base.pyi +++ b/numpy/_typing/_nbit_base.pyi @@ -8,14 +8,14 @@ from typing_extensions import deprecated # Deprecated in NumPy 2.3, 2025-05-01 @deprecated( "`NBitBase` is deprecated and will be removed from numpy.typing in the " - "future. Use `@typing.overload` or a `TypeVar` with a scalar-type as upper " + "future. Use `@typing.overload` or a type parameter with a scalar-type as upper " "bound, instead. (deprecated in NumPy 2.3)", ) @final class NBitBase: ... @final -class _256Bit(NBitBase): ... +class _256Bit(NBitBase): ... # type: ignore[deprecated] @final class _128Bit(_256Bit): ... diff --git a/numpy/_typing/_nested_sequence.py b/numpy/_typing/_nested_sequence.py index e3362a9f21fe..6755c2ec0ec9 100644 --- a/numpy/_typing/_nested_sequence.py +++ b/numpy/_typing/_nested_sequence.py @@ -1,23 +1,21 @@ """A module containing the `_NestedSequence` protocol.""" -from typing import TYPE_CHECKING, Any, Protocol, TypeVar, runtime_checkable +from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable if TYPE_CHECKING: from collections.abc import Iterator __all__ = ["_NestedSequence"] -_T_co = TypeVar("_T_co", covariant=True) - @runtime_checkable -class _NestedSequence(Protocol[_T_co]): +class _NestedSequence[T](Protocol): """A protocol for representing nested sequences. Warning ------- `_NestedSequence` currently does not work in combination with typevars, - *e.g.* ``def func(a: _NestedSequnce[T]) -> T: ...``. + *e.g.* ``def func(a: _NestedSequence[T]) -> T: ...``. See Also -------- @@ -54,7 +52,7 @@ def __len__(self, /) -> int: """Implement ``len(self)``.""" raise NotImplementedError - def __getitem__(self, index: int, /) -> "_T_co | _NestedSequence[_T_co]": + def __getitem__(self, index: int, /) -> "T | _NestedSequence[T]": """Implement ``self[x]``.""" raise NotImplementedError @@ -62,11 +60,11 @@ def __contains__(self, x: object, /) -> bool: """Implement ``x in self``.""" raise NotImplementedError - def __iter__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": + def __iter__(self, /) -> "Iterator[T | _NestedSequence[T]]": """Implement ``iter(self)``.""" raise NotImplementedError - def __reversed__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": + def __reversed__(self, /) -> "Iterator[T | _NestedSequence[T]]": """Implement ``reversed(self)``.""" raise NotImplementedError diff --git a/numpy/_typing/_scalars.py b/numpy/_typing/_scalars.py index b0de66d89aa1..2d36c4961c42 100644 --- a/numpy/_typing/_scalars.py +++ b/numpy/_typing/_scalars.py @@ -1,20 +1,20 @@ -from typing import Any, TypeAlias +from typing import Any import numpy as np # NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and # `np.bytes_` are already subclasses of their builtin counterpart -_CharLike_co: TypeAlias = str | bytes +type _CharLike_co = str | bytes # The `Like_co` type-aliases below represent all scalars that can be # coerced into `` (with the casting rule `same_kind`) -_BoolLike_co: TypeAlias = bool | np.bool -_UIntLike_co: TypeAlias = bool | np.unsignedinteger | np.bool -_IntLike_co: TypeAlias = int | np.integer | np.bool -_FloatLike_co: TypeAlias = float | np.floating | np.integer | np.bool -_ComplexLike_co: TypeAlias = complex | np.number | np.bool -_NumberLike_co: TypeAlias = _ComplexLike_co -_TD64Like_co: TypeAlias = int | np.timedelta64 | np.integer | np.bool +type _BoolLike_co = bool | np.bool +type _UIntLike_co = bool | np.unsignedinteger | np.bool +type _IntLike_co = int | np.integer | np.bool +type _FloatLike_co = float | np.floating | np.integer | np.bool +type _ComplexLike_co = complex | np.number | np.bool +type _NumberLike_co = _ComplexLike_co +type _TD64Like_co = int | np.timedelta64 | np.integer | np.bool # `_VoidLike_co` is technically not a scalar, but it's close enough -_VoidLike_co: TypeAlias = tuple[Any, ...] | np.void -_ScalarLike_co: TypeAlias = complex | str | bytes | np.generic +type _VoidLike_co = tuple[Any, ...] | np.void +type _ScalarLike_co = complex | str | bytes | np.generic diff --git a/numpy/_typing/_shape.py b/numpy/_typing/_shape.py index e297aef2f554..132943b283c8 100644 --- a/numpy/_typing/_shape.py +++ b/numpy/_typing/_shape.py @@ -1,8 +1,8 @@ from collections.abc import Sequence -from typing import Any, SupportsIndex, TypeAlias +from typing import Any, SupportsIndex -_Shape: TypeAlias = tuple[int, ...] -_AnyShape: TypeAlias = tuple[Any, ...] +type _Shape = tuple[int, ...] +type _AnyShape = tuple[Any, ...] # Anything that can be coerced to a shape tuple -_ShapeLike: TypeAlias = SupportsIndex | Sequence[SupportsIndex] +type _ShapeLike = SupportsIndex | Sequence[SupportsIndex] diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 9d3fa0e5335c..5ace7d90d00a 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -4,54 +4,40 @@ The signatures of the ufuncs are too varied to reasonably type with a single class. So instead, `ufunc` has been expanded into four private subclasses, one for each combination of `~ufunc.nin` and `~ufunc.nout`. -""" # noqa: PYI021 +""" from _typeshed import Incomplete from types import EllipsisType from typing import ( Any, - Generic, Literal, LiteralString, Never, NoReturn, Protocol, SupportsIndex, - TypeAlias, TypedDict, - TypeVar, Unpack, overload, + override, type_check_only, ) import numpy as np from numpy import _CastingKind, _OrderKACF, ufunc -from numpy.typing import NDArray -from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co +from ._array_like import ArrayLike, NDArray, _ArrayLikeBool_co, _ArrayLikeInt_co from ._dtype_like import DTypeLike from ._scalars import _ScalarLike_co from ._shape import _ShapeLike -_T = TypeVar("_T") -_2Tuple: TypeAlias = tuple[_T, _T] -_3Tuple: TypeAlias = tuple[_T, _T, _T] -_4Tuple: TypeAlias = tuple[_T, _T, _T, _T] +type _2Tuple[T] = tuple[T, T] +type _3Tuple[T] = tuple[T, T, T] +type _4Tuple[T] = tuple[T, T, T, T] -_2PTuple: TypeAlias = tuple[_T, _T, *tuple[_T, ...]] -_3PTuple: TypeAlias = tuple[_T, _T, _T, *tuple[_T, ...]] -_4PTuple: TypeAlias = tuple[_T, _T, _T, _T, *tuple[_T, ...]] - -_NTypes = TypeVar("_NTypes", bound=int, covariant=True) -_IDType = TypeVar("_IDType", covariant=True) -_NameType = TypeVar("_NameType", bound=LiteralString, covariant=True) -_Signature = TypeVar("_Signature", bound=LiteralString, covariant=True) - -_NIn = TypeVar("_NIn", bound=int, covariant=True) -_NOut = TypeVar("_NOut", bound=int, covariant=True) -_ReturnType_co = TypeVar("_ReturnType_co", covariant=True) -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) +type _2PTuple[T] = tuple[T, T, *tuple[T, ...]] +type _3PTuple[T] = tuple[T, T, T, *tuple[T, ...]] +type _4PTuple[T] = tuple[T, T, T, T, *tuple[T, ...]] @type_check_only class _SupportsArrayUFunc(Protocol): @@ -89,15 +75,15 @@ class _ReduceKwargs(TypedDict, total=False): # pyright: reportIncompatibleMethodOverride=false @type_check_only -class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +class _UFunc_Nin1_Nout1[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[1]: ... @property @@ -158,15 +144,15 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] @type_check_only -class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +class _UFunc_Nin2_Nout1[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -241,8 +227,9 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i out: np.ndarray | EllipsisType | None = None, ) -> NDArray[Incomplete]: ... - @overload # type: ignore[override] - def reduce( # out=None (default), keepdims=False (default) + @override # type: ignore[override] + @overload # out=None (default), keepdims=False (default) + def reduce( # pyrefly: ignore[bad-override] self, array: ArrayLike, /, @@ -278,6 +265,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i **kwargs: Unpack[_ReduceKwargs], ) -> NDArray[Incomplete]: ... + @override def reduceat( self, array: ArrayLike, @@ -288,7 +276,8 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i out: np.ndarray | EllipsisType | None = None, ) -> NDArray[Incomplete]: ... - @overload # type: ignore[override] + @override # type: ignore[override] + @overload def outer( # (scalar, scalar) -> scalar self, A: _ScalarLike_co, @@ -344,6 +333,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Incomplete] | Incomplete: ... + @override def at( # type: ignore[override] self, a: np.ndarray | _SupportsArrayUFunc, @@ -353,15 +343,15 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i ) -> None: ... @type_check_only -class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +class _UFunc_Nin1_Nout2[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[1]: ... @property @@ -427,15 +417,15 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def at(self, a: Never, indices: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only -class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +class _UFunc_Nin2_Nout2[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -487,15 +477,15 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def at(self, a: Never, indices: Never, b: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only -class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] +class _GUFunc_Nin2_Nout1[NameT: LiteralString, NTypesT: int, IdentT, SignatureT: LiteralString](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -503,7 +493,7 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] @property def nargs(self) -> Literal[3]: ... @property - def signature(self) -> _Signature: ... + def signature(self) -> SignatureT: ... # Scalar for 1D array-likes; ndarray otherwise @overload @@ -580,9 +570,9 @@ class _PyFunc_Kwargs_Nargs4P(TypedDict, total=False): signature: str | _4PTuple[DTypeLike] @type_check_only -class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] +class _PyFunc_Nin1_Nout1[ReturnT, IdentT](ufunc): # type: ignore[misc] @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[1]: ... @property @@ -601,7 +591,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], - ) -> _ReturnType_co: ... + ) -> ReturnT: ... @overload def __call__( self, @@ -609,15 +599,15 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload - def __call__( + def __call__[OutT: np.ndarray]( self, x1: ArrayLike, /, - out: _ArrayT | tuple[_ArrayT], + out: OutT | tuple[OutT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], - ) -> _ArrayT: ... + ) -> OutT: ... @overload def __call__( self, @@ -635,9 +625,9 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] @type_check_only -class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] +class _PyFunc_Nin2_Nout1[ReturnT, IdentT](ufunc): # type: ignore[misc] @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -657,7 +647,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ReturnType_co: ... + ) -> ReturnT: ... @overload def __call__( self, @@ -666,16 +656,16 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload - def __call__( + def __call__[OutT: np.ndarray]( self, x1: ArrayLike, x2: ArrayLike, /, - out: _ArrayT | tuple[_ArrayT], + out: OutT | tuple[OutT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ArrayT: ... + ) -> OutT: ... @overload def __call__( self, @@ -695,8 +685,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Incomplete: ... - @overload # type: ignore[override] - def accumulate( + @override # type: ignore[override] + @overload + def accumulate( # pyrefly: ignore[bad-override] self, array: ArrayLike, /, @@ -705,28 +696,29 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno out: EllipsisType | None = None, ) -> NDArray[np.object_]: ... @overload - def accumulate( + def accumulate[OutT: np.ndarray]( self, array: ArrayLike, /, axis: SupportsIndex = 0, dtype: DTypeLike | None = None, *, - out: _ArrayT, - ) -> _ArrayT: ... + out: OutT, + ) -> OutT: ... - @overload # type: ignore[override] - def reduce( # out=array + @override # type: ignore[override] + @overload # out=array + def reduce[OutT: np.ndarray]( # pyrefly: ignore[bad-override] self, array: ArrayLike, /, axis: _ShapeLike | None = 0, dtype: DTypeLike | None = None, *, - out: _ArrayT | tuple[_ArrayT], + out: OutT | tuple[OutT], keepdims: bool = False, **kwargs: Unpack[_ReduceKwargs], - ) -> _ArrayT: ... + ) -> OutT: ... @overload # out=... def reduce( self, @@ -761,10 +753,11 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno out: EllipsisType | None = None, keepdims: bool = False, **kwargs: Unpack[_ReduceKwargs], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... - @overload # type: ignore[override] - def reduceat( + @override # type: ignore[override] + @overload + def reduceat[OutT: np.ndarray]( # pyrefly: ignore[bad-override] self, array: ArrayLike, /, @@ -772,8 +765,8 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno axis: SupportsIndex = 0, dtype: DTypeLike | None = None, *, - out: _ArrayT | tuple[_ArrayT], - ) -> _ArrayT: ... + out: OutT | tuple[OutT], + ) -> OutT: ... @overload def reduceat( self, @@ -795,8 +788,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, ) -> Incomplete: ... - @overload # type: ignore[override] - def outer( + @override # type: ignore[override] + @overload + def outer( # pyrefly: ignore[bad-override] self, A: _ScalarLike_co, B: _ScalarLike_co, @@ -804,7 +798,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno *, out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ReturnType_co: ... + ) -> ReturnT: ... @overload def outer( self, @@ -814,17 +808,17 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno *, out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload - def outer( + def outer[OutT: np.ndarray]( self, A: ArrayLike, B: ArrayLike, /, *, - out: _ArrayT, + out: OutT, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ArrayT: ... + ) -> OutT: ... @overload def outer( self, @@ -846,6 +840,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Incomplete: ... + @override def at( # type: ignore[override] self, a: np.ndarray | _SupportsArrayUFunc, @@ -855,11 +850,11 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno ) -> None: ... @type_check_only -class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # type: ignore[misc] +class _PyFunc_Nin3P_Nout1[ReturnT, IdentT, NInT: int](ufunc): # type: ignore[misc] @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property - def nin(self) -> _NIn: ... + def nin(self) -> NInT: ... @property def nout(self) -> Literal[1]: ... @property @@ -877,7 +872,7 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ *xs: _ScalarLike_co, out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], - ) -> _ReturnType_co: ... + ) -> ReturnT: ... @overload def __call__( self, @@ -888,18 +883,18 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ *xs: ArrayLike, out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload - def __call__( + def __call__[OutT: np.ndarray]( self, x1: ArrayLike, x2: ArrayLike, x3: ArrayLike, /, *xs: ArrayLike, - out: _ArrayT | tuple[_ArrayT], + out: OutT | tuple[OutT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], - ) -> _ArrayT: ... + ) -> OutT: ... @overload def __call__( self, @@ -919,13 +914,13 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ def at(self, a: Never, indices: Never, /, *args: Never) -> NoReturn: ... # type: ignore[override] @type_check_only -class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]): # type: ignore[misc] +class _PyFunc_Nin1P_Nout2P[ReturnT, IdentT, NInT: int, NOutT: int](ufunc): # type: ignore[misc] @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property - def nin(self) -> _NIn: ... + def nin(self) -> NInT: ... @property - def nout(self) -> _NOut: ... + def nout(self) -> NOutT: ... @property def ntypes(self) -> Literal[1]: ... @property @@ -939,7 +934,7 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) *xs: _ScalarLike_co, out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], - ) -> _2PTuple[_ReturnType_co]: ... + ) -> _2PTuple[ReturnT]: ... @overload def __call__( self, @@ -948,16 +943,16 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) *xs: ArrayLike, out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], - ) -> _2PTuple[_ReturnType_co | NDArray[np.object_]]: ... + ) -> _2PTuple[ReturnT | NDArray[np.object_]]: ... @overload - def __call__( + def __call__[OutT: np.ndarray]( self, x1: ArrayLike, /, *xs: ArrayLike, - out: _2PTuple[_ArrayT], + out: _2PTuple[OutT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], - ) -> _2PTuple[_ArrayT]: ... + ) -> _2PTuple[OutT]: ... @overload def __call__( self, diff --git a/numpy/_utils/__init__.py b/numpy/_utils/__init__.py index 84ee99db1be8..a64482539b9a 100644 --- a/numpy/_utils/__init__.py +++ b/numpy/_utils/__init__.py @@ -11,7 +11,7 @@ import functools import warnings -from ._convertions import asbytes, asunicode +from ._conversions import asbytes, asunicode def set_module(module): diff --git a/numpy/_utils/__init__.pyi b/numpy/_utils/__init__.pyi index b630777ced99..f4c27cf2917f 100644 --- a/numpy/_utils/__init__.pyi +++ b/numpy/_utils/__init__.pyi @@ -1,14 +1,11 @@ from _typeshed import IdentityFunction from collections.abc import Callable, Iterable -from typing import Protocol, TypeVar, overload, type_check_only +from typing import Protocol, overload, type_check_only -from ._convertions import asbytes as asbytes, asunicode as asunicode +from ._conversions import asbytes as asbytes, asunicode as asunicode ### -_T = TypeVar("_T") -_HasModuleT = TypeVar("_HasModuleT", bound=_HasModule) - @type_check_only class _HasModule(Protocol): __module__: str @@ -18,11 +15,11 @@ class _HasModule(Protocol): @overload def set_module(module: None) -> IdentityFunction: ... @overload -def set_module(module: str) -> Callable[[_HasModuleT], _HasModuleT]: ... +def set_module[ModuleT: _HasModule](module: str) -> Callable[[ModuleT], ModuleT]: ... # -def _rename_parameter( +def _rename_parameter[T]( old_names: Iterable[str], new_names: Iterable[str], dep_version: str | None = None, -) -> Callable[[Callable[..., _T]], Callable[..., _T]]: ... +) -> Callable[[Callable[..., T]], Callable[..., T]]: ... diff --git a/numpy/_utils/_convertions.py b/numpy/_utils/_conversions.py similarity index 100% rename from numpy/_utils/_convertions.py rename to numpy/_utils/_conversions.py diff --git a/numpy/_utils/_convertions.pyi b/numpy/_utils/_conversions.pyi similarity index 100% rename from numpy/_utils/_convertions.pyi rename to numpy/_utils/_conversions.pyi diff --git a/numpy/_utils/_inspect.pyi b/numpy/_utils/_inspect.pyi index 40546d2f4497..dd738025b728 100644 --- a/numpy/_utils/_inspect.pyi +++ b/numpy/_utils/_inspect.pyi @@ -1,21 +1,18 @@ import types from _typeshed import SupportsLenAndGetItem from collections.abc import Callable, Mapping -from typing import Any, Final, TypeAlias, TypeVar, overload +from typing import Any, Final, overload from typing_extensions import TypeIs __all__ = ["formatargspec", "getargspec"] ### -_T = TypeVar("_T") -_RT = TypeVar("_RT") +type _StrSeq = SupportsLenAndGetItem[str] +type _NestedSeq[T] = list[T | _NestedSeq[T]] | tuple[T | _NestedSeq[T], ...] -_StrSeq: TypeAlias = SupportsLenAndGetItem[str] -_NestedSeq: TypeAlias = list[_T | _NestedSeq[_T]] | tuple[_T | _NestedSeq[_T], ...] - -_JoinFunc: TypeAlias = Callable[[list[_T]], _T] -_FormatFunc: TypeAlias = Callable[[_T], str] +type _JoinFunc[T] = Callable[[list[T]], T] +type _FormatFunc[T] = Callable[[T], str] ### @@ -43,7 +40,7 @@ def joinseq(seq: _StrSeq) -> str: ... @overload def strseq(object: _NestedSeq[str], convert: Callable[[Any], Any], join: _JoinFunc[str] = ...) -> str: ... @overload -def strseq(object: _NestedSeq[_T], convert: Callable[[_T], _RT], join: _JoinFunc[_RT]) -> _RT: ... +def strseq[VT, RT](object: _NestedSeq[VT], convert: Callable[[VT], RT], join: _JoinFunc[RT]) -> RT: ... # def formatargspec( diff --git a/numpy/_utils/_pep440.pyi b/numpy/_utils/_pep440.pyi index 11ae02e57a59..593960274814 100644 --- a/numpy/_utils/_pep440.pyi +++ b/numpy/_utils/_pep440.pyi @@ -7,17 +7,15 @@ from typing import ( Generic, Literal as L, NamedTuple, - TypeVar, final, type_check_only, ) -from typing_extensions import TypeIs +from typing_extensions import TypeIs, TypeVar __all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"] ### -_CmpKeyT = TypeVar("_CmpKeyT", bound=tuple[object, ...]) _CmpKeyT_co = TypeVar("_CmpKeyT_co", bound=tuple[object, ...], default=tuple[Any, ...], covariant=True) ### @@ -71,7 +69,12 @@ class _BaseVersion(Generic[_CmpKeyT_co]): def __le__(self, other: _BaseVersion, /) -> bool: ... def __ge__(self, other: _BaseVersion, /) -> bool: ... def __gt__(self, other: _BaseVersion, /) -> bool: ... - def _compare(self, /, other: _BaseVersion[_CmpKeyT], method: Callable[[_CmpKeyT_co, _CmpKeyT], bool]) -> bool: ... + def _compare[CmpKeyT: tuple[object, ...]]( + self, + /, + other: _BaseVersion[CmpKeyT], + method: Callable[[_CmpKeyT_co, CmpKeyT], bool], + ) -> bool: ... class LegacyVersion(_BaseVersion[tuple[L[-1], tuple[str, ...]]]): _version: Final[str] diff --git a/numpy/char/__init__.py b/numpy/char/__init__.py index d98d38c1d6af..a757fcee58ac 100644 --- a/numpy/char/__init__.py +++ b/numpy/char/__init__.py @@ -1,2 +1,31 @@ -from numpy._core.defchararray import * from numpy._core.defchararray import __all__, __doc__ + +__DEPRECATED = frozenset({"chararray", "array", "asarray"}) + + +def __getattr__(name: str): + if name in __DEPRECATED: + # Deprecated in NumPy 2.5, 2026-01-07 + import warnings + + warnings.warn( + ( + "The chararray class is deprecated and will be removed in a future " + "release. Use an ndarray with a string or bytes dtype instead." + ), + DeprecationWarning, + stacklevel=2, + ) + + import numpy._core.defchararray as char + + if (export := getattr(char, name, None)) is not None: + return export + + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + +def __dir__() -> list[str]: + import numpy._core.defchararray as char + + return dir(char) diff --git a/numpy/char/__init__.pyi b/numpy/char/__init__.pyi index e151f20e5f38..f53ff7e483b0 100644 --- a/numpy/char/__init__.pyi +++ b/numpy/char/__init__.pyi @@ -1,10 +1,10 @@ -from numpy._core.defchararray import ( +from numpy._core.defchararray import ( # type: ignore[deprecated] add, array, asarray, capitalize, center, - chararray, + chararray, # pyrefly: ignore[deprecated] compare_chararrays, count, decode, diff --git a/numpy/conftest.py b/numpy/conftest.py index c3c96ef3bc39..d116d6e65727 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -23,9 +23,9 @@ try: import pytest_run_parallel # noqa: F401 - PARALLEL_RUN_AVALIABLE = True + PARALLEL_RUN_AVAILABLE = True except ModuleNotFoundError: - PARALLEL_RUN_AVALIABLE = False + PARALLEL_RUN_AVAILABLE = False _old_fpu_mode = None _collect_results = {} @@ -66,9 +66,7 @@ def pytest_configure(config): "leaks_references: Tests that are known to leak references.") config.addinivalue_line("markers", "slow: Tests that are very slow.") - config.addinivalue_line("markers", - "slow_pypy: Tests that are very slow on pypy.") - if not PARALLEL_RUN_AVALIABLE: + if not PARALLEL_RUN_AVAILABLE: config.addinivalue_line("markers", "parallel_threads(n): run the given test function in parallel " "using `n` threads.", @@ -183,9 +181,12 @@ def warnings_errors_and_rng(test=None): "numpy.core", "Importing from numpy.matlib", "This function is deprecated.", # random_integers - "Data type alias 'a'", # numpy.rec.fromfile "Arrays of 2-dimensional vectors", # matlib.cross - "NumPy warning suppression and assertion utilities are deprecated." + "NumPy warning suppression and assertion utilities are deprecated.", + "numpy.fix is deprecated", # fix -> trunc + "The chararray class is deprecated", # char.chararray + "numpy.typename is deprecated", # typename -> dtype.name + "numpy.ma.round_ is deprecated", # ma.round_ -> ma.round ] msg = "|".join(msgs) diff --git a/numpy/core/__init__.pyi b/numpy/core/__init__.pyi index e69de29bb2d1..cecacb907939 100644 --- a/numpy/core/__init__.pyi +++ b/numpy/core/__init__.pyi @@ -0,0 +1,45 @@ +# deprecated module + +from types import ModuleType + +from . import ( + _dtype, + _dtype_ctypes, + _internal, + arrayprint, + defchararray, + einsumfunc, + fromnumeric, + function_base, + getlimits, + multiarray, + numeric, + numerictypes, + overrides, + records, + shape_base, + umath, +) + +__all__ = [ + "_dtype", + "_dtype_ctypes", + "_internal", + "_multiarray_umath", + "arrayprint", + "defchararray", + "einsumfunc", + "fromnumeric", + "function_base", + "getlimits", + "multiarray", + "numeric", + "numerictypes", + "overrides", + "records", + "shape_base", + "umath", +] + +# `numpy._core._multiarray_umath` has no stubs, so there's nothing to re-export +_multiarray_umath: ModuleType diff --git a/numpy/core/_internal.pyi b/numpy/core/_internal.pyi new file mode 100644 index 000000000000..449f01c97af7 --- /dev/null +++ b/numpy/core/_internal.pyi @@ -0,0 +1 @@ +# deprecated module diff --git a/numpy/core/arrayprint.pyi b/numpy/core/arrayprint.pyi new file mode 100644 index 000000000000..c4e5c5e5cc44 --- /dev/null +++ b/numpy/core/arrayprint.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.arrayprint import * +from numpy._core.arrayprint import __all__ as __all__ diff --git a/numpy/core/defchararray.pyi b/numpy/core/defchararray.pyi new file mode 100644 index 000000000000..4a2f369c1f7d --- /dev/null +++ b/numpy/core/defchararray.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.defchararray import * +from numpy._core.defchararray import __all__ as __all__ diff --git a/numpy/core/einsumfunc.pyi b/numpy/core/einsumfunc.pyi new file mode 100644 index 000000000000..476c79bc2006 --- /dev/null +++ b/numpy/core/einsumfunc.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.einsumfunc import * +from numpy._core.einsumfunc import __all__ as __all__ diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi new file mode 100644 index 000000000000..8e5ac5b765f5 --- /dev/null +++ b/numpy/core/fromnumeric.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.fromnumeric import * +from numpy._core.fromnumeric import __all__ as __all__ diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi new file mode 100644 index 000000000000..fa041a9d3d60 --- /dev/null +++ b/numpy/core/function_base.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.function_base import * +from numpy._core.function_base import __all__ as __all__ diff --git a/numpy/core/getlimits.pyi b/numpy/core/getlimits.pyi new file mode 100644 index 000000000000..91a9dec49d42 --- /dev/null +++ b/numpy/core/getlimits.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.getlimits import * +from numpy._core.getlimits import __all__ as __all__ diff --git a/numpy/core/multiarray.pyi b/numpy/core/multiarray.pyi new file mode 100644 index 000000000000..d58f20dcc4c8 --- /dev/null +++ b/numpy/core/multiarray.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.multiarray import * +from numpy._core.multiarray import __all__ as __all__ diff --git a/numpy/core/numeric.pyi b/numpy/core/numeric.pyi new file mode 100644 index 000000000000..dbb936364c46 --- /dev/null +++ b/numpy/core/numeric.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.numeric import * +from numpy._core.numeric import __all__ as __all__ diff --git a/numpy/core/numerictypes.pyi b/numpy/core/numerictypes.pyi new file mode 100644 index 000000000000..5251eae02b6a --- /dev/null +++ b/numpy/core/numerictypes.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.numerictypes import * +from numpy._core.numerictypes import __all__ as __all__ diff --git a/numpy/core/records.pyi b/numpy/core/records.pyi new file mode 100644 index 000000000000..f6672b47ba6a --- /dev/null +++ b/numpy/core/records.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.records import * +from numpy._core.records import __all__ as __all__ diff --git a/numpy/core/shape_base.pyi b/numpy/core/shape_base.pyi new file mode 100644 index 000000000000..0d4d077d7e64 --- /dev/null +++ b/numpy/core/shape_base.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.shape_base import * +from numpy._core.shape_base import __all__ as __all__ diff --git a/numpy/core/umath.pyi b/numpy/core/umath.pyi new file mode 100644 index 000000000000..b32fc9b11d8f --- /dev/null +++ b/numpy/core/umath.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.umath import * +from numpy._core.umath import __all__ as __all__ diff --git a/numpy/ctypeslib/_ctypeslib.py b/numpy/ctypeslib/_ctypeslib.py index 9255603cd5d0..2ac905f166fc 100644 --- a/numpy/ctypeslib/_ctypeslib.py +++ b/numpy/ctypeslib/_ctypeslib.py @@ -193,7 +193,7 @@ def from_param(cls, obj): raise TypeError(f"array must have data type {cls._dtype_}") if cls._ndim_ is not None \ and obj.ndim != cls._ndim_: - raise TypeError("array must have %d dimension(s)" % cls._ndim_) + raise TypeError(f"array must have {cls._ndim_} dimension(s)") if cls._shape_ is not None \ and obj.shape != cls._shape_: raise TypeError(f"array must have shape {str(cls._shape_)}") @@ -333,7 +333,7 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None): else: name = dtype.str if ndim is not None: - name += "_%dd" % ndim + name += f"_{ndim}d" if shape is not None: name += "_" + "x".join(str(x) for x in shape) if flags is not None: @@ -502,7 +502,7 @@ def as_ctypes_type(dtype): -------- Converting a simple dtype: - >>> dt = np.dtype('int8') + >>> dt = np.dtype(np.int8) >>> ctype = np.ctypeslib.as_ctypes_type(dt) >>> ctype diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index 8881141f8ed5..3ab72549f472 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -1,31 +1,9 @@ -# NOTE: Numpy's mypy plugin is used for importing the correct -# platform-specific `ctypes._SimpleCData[int]` sub-type -import ctypes +import ctypes as ct from _typeshed import StrOrBytesPath from collections.abc import Iterable, Sequence -from ctypes import c_int64 as _c_intp -from typing import Any, ClassVar, Generic, Literal as L, TypeAlias, TypeVar, overload +from typing import Any, ClassVar, Literal as L, overload import numpy as np -from numpy import ( - byte, - double, - dtype, - generic, - intc, - long, - longdouble, - longlong, - ndarray, - short, - single, - ubyte, - uintc, - ulong, - ulonglong, - ushort, - void, -) from numpy._core._internal import _ctypes from numpy._core.multiarray import flagsobj from numpy._typing import ( @@ -34,32 +12,33 @@ from numpy._typing import ( _AnyShape, _ArrayLike, _BoolCodes, - _ByteCodes, - _DoubleCodes, _DTypeLike, + _Float32Codes, + _Float64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, _IntCCodes, + _IntPCodes, _LongCodes, _LongDoubleCodes, _LongLongCodes, _ShapeLike, - _ShortCodes, - _SingleCodes, - _UByteCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, _UIntCCodes, + _UIntPCodes, _ULongCodes, _ULongLongCodes, - _UShortCodes, _VoidDTypeLike, ) __all__ = ["load_library", "ndpointer", "c_intp", "as_ctypes", "as_array", "as_ctypes_type"] -# TODO: Add a proper `_Shape` bound once we've got variadic typevars -_DTypeT = TypeVar("_DTypeT", bound=dtype) -_DTypeOptionalT = TypeVar("_DTypeOptionalT", bound=dtype | None) -_ScalarT = TypeVar("_ScalarT", bound=generic) - -_FlagsKind: TypeAlias = L[ +type _FlagsKind = L[ "C_CONTIGUOUS", "CONTIGUOUS", "C", "F_CONTIGUOUS", "FORTRAN", "F", "ALIGNED", "A", @@ -68,32 +47,34 @@ _FlagsKind: TypeAlias = L[ "WRITEBACKIFCOPY", "X", ] -# TODO: Add a shape typevar once we have variadic typevars (PEP 646) -class _ndptr(ctypes.c_void_p, Generic[_DTypeOptionalT]): +# TODO: Add a shape type parameter +class _ndptr[OptionalDTypeT: np.dtype | None](ct.c_void_p): # In practice these 4 classvars are defined in the dynamic class # returned by `ndpointer` - _dtype_: ClassVar[_DTypeOptionalT] - _shape_: ClassVar[_AnyShape | None] - _ndim_: ClassVar[int | None] - _flags_: ClassVar[list[_FlagsKind] | None] + _dtype_: OptionalDTypeT = ... + _shape_: ClassVar[_AnyShape | None] = ... + _ndim_: ClassVar[int | None] = ... + _flags_: ClassVar[list[_FlagsKind] | None] = ... @overload # type: ignore[override] @classmethod - def from_param(cls: type[_ndptr[None]], obj: NDArray[Any]) -> _ctypes[Any]: ... + def from_param(cls: type[_ndptr[None]], obj: np.ndarray) -> _ctypes[Any]: ... # pyrefly: ignore[bad-override] @overload @classmethod - def from_param(cls: type[_ndptr[_DTypeT]], obj: ndarray[Any, _DTypeT]) -> _ctypes[Any]: ... + def from_param[DTypeT: np.dtype](cls: type[_ndptr[DTypeT]], obj: np.ndarray[Any, DTypeT]) -> _ctypes[Any]: ... # pyright: ignore[reportIncompatibleMethodOverride] + +class _concrete_ndptr[DTypeT: np.dtype](_ndptr[DTypeT]): + _dtype_: DTypeT = ... + _shape_: ClassVar[_AnyShape] = ... # pyright: ignore[reportIncompatibleVariableOverride] -class _concrete_ndptr(_ndptr[_DTypeT]): - _dtype_: ClassVar[_DTypeT] - _shape_: ClassVar[_AnyShape] @property - def contents(self) -> ndarray[_AnyShape, _DTypeT]: ... + def contents(self) -> np.ndarray[_AnyShape, DTypeT]: ... -def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ctypes.CDLL: ... +def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ct.CDLL: ... -c_intp = _c_intp +c_intp = ct.c_int64 # most platforms are 64-bit nowadays +# @overload def ndpointer( dtype: None = None, @@ -102,13 +83,13 @@ def ndpointer( flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, ) -> type[_ndptr[None]]: ... @overload -def ndpointer( - dtype: _DTypeLike[_ScalarT], +def ndpointer[ScalarT: np.generic]( + dtype: _DTypeLike[ScalarT], ndim: int | None = None, *, shape: _ShapeLike, flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, -) -> type[_concrete_ndptr[dtype[_ScalarT]]]: ... +) -> type[_concrete_ndptr[np.dtype[ScalarT]]]: ... @overload def ndpointer( dtype: DTypeLike | None, @@ -116,121 +97,126 @@ def ndpointer( *, shape: _ShapeLike, flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, -) -> type[_concrete_ndptr[dtype]]: ... +) -> type[_concrete_ndptr[np.dtype]]: ... @overload -def ndpointer( - dtype: _DTypeLike[_ScalarT], +def ndpointer[ScalarT: np.generic]( + dtype: _DTypeLike[ScalarT], ndim: int | None = None, shape: None = None, flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, -) -> type[_ndptr[dtype[_ScalarT]]]: ... +) -> type[_ndptr[np.dtype[ScalarT]]]: ... @overload def ndpointer( dtype: DTypeLike | None, ndim: int | None = None, shape: None = None, flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, -) -> type[_ndptr[dtype]]: ... +) -> type[_ndptr[np.dtype]]: ... -@overload -def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ctypes.c_bool]) -> type[ctypes.c_bool]: ... -@overload -def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | type[ctypes.c_byte]) -> type[ctypes.c_byte]: ... -@overload -def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | type[ctypes.c_short]) -> type[ctypes.c_short]: ... -@overload -def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | type[ctypes.c_int]) -> type[ctypes.c_int]: ... -@overload -def as_ctypes_type(dtype: _LongCodes | _DTypeLike[long] | type[ctypes.c_long]) -> type[ctypes.c_long]: ... -@overload -def as_ctypes_type(dtype: type[int]) -> type[c_intp]: ... -@overload -def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | type[ctypes.c_longlong]) -> type[ctypes.c_longlong]: ... -@overload -def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | type[ctypes.c_ubyte]) -> type[ctypes.c_ubyte]: ... -@overload -def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | type[ctypes.c_ushort]) -> type[ctypes.c_ushort]: ... -@overload -def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | type[ctypes.c_uint]) -> type[ctypes.c_uint]: ... -@overload -def as_ctypes_type(dtype: _ULongCodes | _DTypeLike[ulong] | type[ctypes.c_ulong]) -> type[ctypes.c_ulong]: ... -@overload -def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | type[ctypes.c_ulonglong]) -> type[ctypes.c_ulonglong]: ... -@overload -def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | type[ctypes.c_float]) -> type[ctypes.c_float]: ... -@overload -def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | type[float | ctypes.c_double]) -> type[ctypes.c_double]: ... -@overload -def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | type[ctypes.c_longdouble]) -> type[ctypes.c_longdouble]: ... -@overload -def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ... # `ctypes.Union` or `ctypes.Structure` -@overload +# +@overload # bool +def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ct.c_bool]) -> type[ct.c_bool]: ... +@overload # int8 +def as_ctypes_type(dtype: _Int8Codes | _DTypeLike[np.int8] | type[ct.c_int8]) -> type[ct.c_int8]: ... +@overload # int16 +def as_ctypes_type(dtype: _Int16Codes | _DTypeLike[np.int16] | type[ct.c_int16]) -> type[ct.c_int16]: ... +@overload # int32 +def as_ctypes_type(dtype: _Int32Codes | _DTypeLike[np.int32] | type[ct.c_int32]) -> type[ct.c_int32]: ... +@overload # int64 +def as_ctypes_type(dtype: _Int64Codes | _DTypeLike[np.int64] | type[ct.c_int64]) -> type[ct.c_int64]: ... +@overload # intc +def as_ctypes_type(dtype: _IntCCodes | type[ct.c_int]) -> type[ct.c_int]: ... +@overload # long +def as_ctypes_type(dtype: _LongCodes | type[ct.c_long]) -> type[ct.c_long]: ... +@overload # longlong +def as_ctypes_type(dtype: _LongLongCodes | type[ct.c_longlong]) -> type[ct.c_longlong]: ... +@overload # intp +def as_ctypes_type(dtype: _IntPCodes | type[ct.c_ssize_t] | type[int]) -> type[ct.c_ssize_t]: ... +@overload # uint8 +def as_ctypes_type(dtype: _UInt8Codes | _DTypeLike[np.uint8] | type[ct.c_uint8]) -> type[ct.c_uint8]: ... +@overload # uint16 +def as_ctypes_type(dtype: _UInt16Codes | _DTypeLike[np.uint16] | type[ct.c_uint16]) -> type[ct.c_uint16]: ... +@overload # uint32 +def as_ctypes_type(dtype: _UInt32Codes | _DTypeLike[np.uint32] | type[ct.c_uint32]) -> type[ct.c_uint32]: ... +@overload # uint64 +def as_ctypes_type(dtype: _UInt64Codes | _DTypeLike[np.uint64] | type[ct.c_uint64]) -> type[ct.c_uint64]: ... +@overload # uintc +def as_ctypes_type(dtype: _UIntCCodes | type[ct.c_uint]) -> type[ct.c_uint]: ... +@overload # ulong +def as_ctypes_type(dtype: _ULongCodes | type[ct.c_ulong]) -> type[ct.c_ulong]: ... +@overload # ulonglong +def as_ctypes_type(dtype: _ULongLongCodes | type[ct.c_ulonglong]) -> type[ct.c_ulonglong]: ... +@overload # uintp +def as_ctypes_type(dtype: _UIntPCodes | type[ct.c_size_t]) -> type[ct.c_size_t]: ... +@overload # float32 +def as_ctypes_type(dtype: _Float32Codes | _DTypeLike[np.float32] | type[ct.c_float]) -> type[ct.c_float]: ... +@overload # float64 +def as_ctypes_type(dtype: _Float64Codes | _DTypeLike[np.float64] | type[float | ct.c_double]) -> type[ct.c_double]: ... +@overload # longdouble +def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[np.longdouble] | type[ct.c_longdouble]) -> type[ct.c_longdouble]: ... +@overload # void +def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ... # `ct.Union` or `ct.Structure` +@overload # fallback def as_ctypes_type(dtype: str) -> type[Any]: ... +# @overload -def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... +def as_array(obj: ct._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... @overload -def as_array(obj: _ArrayLike[_ScalarT], shape: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +def as_array[ScalarT: np.generic](obj: _ArrayLike[ScalarT], shape: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def as_array(obj: object, shape: _ShapeLike | None = None) -> NDArray[Any]: ... +# @overload -def as_ctypes(obj: np.bool) -> ctypes.c_bool: ... -@overload -def as_ctypes(obj: byte) -> ctypes.c_byte: ... -@overload -def as_ctypes(obj: short) -> ctypes.c_short: ... -@overload -def as_ctypes(obj: intc) -> ctypes.c_int: ... -@overload -def as_ctypes(obj: long) -> ctypes.c_long: ... +def as_ctypes(obj: np.bool) -> ct.c_bool: ... @overload -def as_ctypes(obj: longlong) -> ctypes.c_longlong: ... # type: ignore[overload-cannot-match] +def as_ctypes(obj: np.int8) -> ct.c_int8: ... @overload -def as_ctypes(obj: ubyte) -> ctypes.c_ubyte: ... +def as_ctypes(obj: np.int16) -> ct.c_int16: ... @overload -def as_ctypes(obj: ushort) -> ctypes.c_ushort: ... +def as_ctypes(obj: np.int32) -> ct.c_int32: ... @overload -def as_ctypes(obj: uintc) -> ctypes.c_uint: ... +def as_ctypes(obj: np.int64) -> ct.c_int64: ... @overload -def as_ctypes(obj: ulong) -> ctypes.c_ulong: ... +def as_ctypes(obj: np.uint8) -> ct.c_uint8: ... @overload -def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ... # type: ignore[overload-cannot-match] +def as_ctypes(obj: np.uint16) -> ct.c_uint16: ... @overload -def as_ctypes(obj: single) -> ctypes.c_float: ... +def as_ctypes(obj: np.uint32) -> ct.c_uint32: ... @overload -def as_ctypes(obj: double) -> ctypes.c_double: ... +def as_ctypes(obj: np.uint64) -> ct.c_uint64: ... @overload -def as_ctypes(obj: longdouble) -> ctypes.c_longdouble: ... +def as_ctypes(obj: np.float32) -> ct.c_float: ... @overload -def as_ctypes(obj: void) -> Any: ... # `ctypes.Union` or `ctypes.Structure` +def as_ctypes(obj: np.float64) -> ct.c_double: ... @overload -def as_ctypes(obj: NDArray[np.bool]) -> ctypes.Array[ctypes.c_bool]: ... +def as_ctypes(obj: np.longdouble) -> ct.c_longdouble: ... @overload -def as_ctypes(obj: NDArray[byte]) -> ctypes.Array[ctypes.c_byte]: ... +def as_ctypes(obj: np.void) -> Any: ... # `ct.Union` or `ct.Structure` @overload -def as_ctypes(obj: NDArray[short]) -> ctypes.Array[ctypes.c_short]: ... +def as_ctypes(obj: NDArray[np.bool]) -> ct.Array[ct.c_bool]: ... @overload -def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]: ... +def as_ctypes(obj: NDArray[np.int8]) -> ct.Array[ct.c_int8]: ... @overload -def as_ctypes(obj: NDArray[long]) -> ctypes.Array[ctypes.c_long]: ... +def as_ctypes(obj: NDArray[np.int16]) -> ct.Array[ct.c_int16]: ... @overload -def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ... # type: ignore[overload-cannot-match] +def as_ctypes(obj: NDArray[np.int32]) -> ct.Array[ct.c_int32]: ... @overload -def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]: ... +def as_ctypes(obj: NDArray[np.int64]) -> ct.Array[ct.c_int64]: ... @overload -def as_ctypes(obj: NDArray[ushort]) -> ctypes.Array[ctypes.c_ushort]: ... +def as_ctypes(obj: NDArray[np.uint8]) -> ct.Array[ct.c_uint8]: ... @overload -def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]: ... +def as_ctypes(obj: NDArray[np.uint16]) -> ct.Array[ct.c_uint16]: ... @overload -def as_ctypes(obj: NDArray[ulong]) -> ctypes.Array[ctypes.c_ulong]: ... +def as_ctypes(obj: NDArray[np.uint32]) -> ct.Array[ct.c_uint32]: ... @overload -def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ... # type: ignore[overload-cannot-match] +def as_ctypes(obj: NDArray[np.uint64]) -> ct.Array[ct.c_uint64]: ... @overload -def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]: ... +def as_ctypes(obj: NDArray[np.float32]) -> ct.Array[ct.c_float]: ... @overload -def as_ctypes(obj: NDArray[double]) -> ctypes.Array[ctypes.c_double]: ... +def as_ctypes(obj: NDArray[np.float64]) -> ct.Array[ct.c_double]: ... @overload -def as_ctypes(obj: NDArray[longdouble]) -> ctypes.Array[ctypes.c_longdouble]: ... +def as_ctypes(obj: NDArray[np.longdouble]) -> ct.Array[ct.c_longdouble]: ... @overload -def as_ctypes(obj: NDArray[void]) -> ctypes.Array[Any]: ... # `ctypes.Union` or `ctypes.Structure` +def as_ctypes(obj: NDArray[np.void]) -> ct.Array[Any]: ... # `ct.Union` or `ct.Structure` diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py deleted file mode 100644 index f74ed4d3f6db..000000000000 --- a/numpy/distutils/__init__.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -An enhanced distutils, providing support for Fortran compilers, for BLAS, -LAPACK and other common libraries for numerical computing, and more. - -Public submodules are:: - - misc_util - system_info - cpu_info - log - exec_command - -For details, please see the *Packaging* and *NumPy Distutils User Guide* -sections of the NumPy Reference Guide. - -For configuring the preference for and location of libraries like BLAS and -LAPACK, and for setting include paths and similar build options, please see -``site.cfg.example`` in the root of the NumPy repository or sdist. - -""" - -import warnings - -# Must import local ccompiler ASAP in order to get -# customized CCompiler.spawn effective. -from . import ccompiler -from . import unixccompiler - -from .npy_pkg_config import * - -warnings.warn("\n\n" - " `numpy.distutils` is deprecated since NumPy 1.23.0, as a result\n" - " of the deprecation of `distutils` itself. It will be removed for\n" - " Python >= 3.12. For older Python versions it will remain present.\n" - " It is recommended to use `setuptools < 60.0` for those Python versions.\n" - " For more details, see:\n" - " https://numpy.org/devdocs/reference/distutils_status_migration.html \n\n", - DeprecationWarning, stacklevel=2 -) -del warnings - -# If numpy is installed, add distutils.test() -try: - from . import __config__ - # Normally numpy is installed if the above import works, but an interrupted - # in-place build could also have left a __config__.py. In that case the - # next import may still fail, so keep it inside the try block. - from numpy._pytesttester import PytestTester - test = PytestTester(__name__) - del PytestTester -except ImportError: - pass - - -def customized_fcompiler(plat=None, compiler=None): - from numpy.distutils.fcompiler import new_fcompiler - c = new_fcompiler(plat=plat, compiler=compiler) - c.customize() - return c - -def customized_ccompiler(plat=None, compiler=None, verbose=1): - c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose) - c.customize('') - return c diff --git a/numpy/distutils/__init__.pyi b/numpy/distutils/__init__.pyi deleted file mode 100644 index 3938d68de14c..000000000000 --- a/numpy/distutils/__init__.pyi +++ /dev/null @@ -1,4 +0,0 @@ -from typing import Any - -# TODO: remove when the full numpy namespace is defined -def __getattr__(name: str) -> Any: ... diff --git a/numpy/distutils/_shell_utils.py b/numpy/distutils/_shell_utils.py deleted file mode 100644 index 9a1c8ce718c9..000000000000 --- a/numpy/distutils/_shell_utils.py +++ /dev/null @@ -1,87 +0,0 @@ -""" -Helper functions for interacting with the shell, and consuming shell-style -parameters provided in config files. -""" -import os -import shlex -import subprocess - -__all__ = ['WindowsParser', 'PosixParser', 'NativeParser'] - - -class CommandLineParser: - """ - An object that knows how to split and join command-line arguments. - - It must be true that ``argv == split(join(argv))`` for all ``argv``. - The reverse neednt be true - `join(split(cmd))` may result in the addition - or removal of unnecessary escaping. - """ - @staticmethod - def join(argv): - """ Join a list of arguments into a command line string """ - raise NotImplementedError - - @staticmethod - def split(cmd): - """ Split a command line string into a list of arguments """ - raise NotImplementedError - - -class WindowsParser: - """ - The parsing behavior used by `subprocess.call("string")` on Windows, which - matches the Microsoft C/C++ runtime. - - Note that this is _not_ the behavior of cmd. - """ - @staticmethod - def join(argv): - # note that list2cmdline is specific to the windows syntax - return subprocess.list2cmdline(argv) - - @staticmethod - def split(cmd): - import ctypes # guarded import for systems without ctypes - try: - ctypes.windll - except AttributeError: - raise NotImplementedError - - # Windows has special parsing rules for the executable (no quotes), - # that we do not care about - insert a dummy element - if not cmd: - return [] - cmd = 'dummy ' + cmd - - CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW - CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p) - CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int)) - - nargs = ctypes.c_int() - lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs)) - args = [lpargs[i] for i in range(nargs.value)] - assert not ctypes.windll.kernel32.LocalFree(lpargs) - - # strip the element we inserted - assert args[0] == "dummy" - return args[1:] - - -class PosixParser: - """ - The parsing behavior used by `subprocess.call("string", shell=True)` on Posix. - """ - @staticmethod - def join(argv): - return ' '.join(shlex.quote(arg) for arg in argv) - - @staticmethod - def split(cmd): - return shlex.split(cmd, posix=True) - - -if os.name == 'nt': - NativeParser = WindowsParser -elif os.name == 'posix': - NativeParser = PosixParser diff --git a/numpy/distutils/armccompiler.py b/numpy/distutils/armccompiler.py deleted file mode 100644 index afba7eb3b352..000000000000 --- a/numpy/distutils/armccompiler.py +++ /dev/null @@ -1,26 +0,0 @@ -from distutils.unixccompiler import UnixCCompiler - -class ArmCCompiler(UnixCCompiler): - - """ - Arm compiler. - """ - - compiler_type = 'arm' - cc_exe = 'armclang' - cxx_exe = 'armclang++' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables(compiler=cc_compiler + - ' -O3 -fPIC', - compiler_so=cc_compiler + - ' -O3 -fPIC', - compiler_cxx=cxx_compiler + - ' -O3 -fPIC', - linker_exe=cc_compiler + - ' -lamath', - linker_so=cc_compiler + - ' -lamath -shared') diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py deleted file mode 100644 index dee13b1c9e84..000000000000 --- a/numpy/distutils/ccompiler.py +++ /dev/null @@ -1,826 +0,0 @@ -import os -import re -import sys -import platform -import shlex -import time -import subprocess -from copy import copy -from pathlib import Path -from distutils import ccompiler -from distutils.ccompiler import ( - compiler_class, gen_lib_options, get_default_compiler, new_compiler, - CCompiler -) -from distutils.errors import ( - DistutilsExecError, DistutilsModuleError, DistutilsPlatformError, - CompileError, UnknownFileError -) -from distutils.sysconfig import customize_compiler -from distutils.version import LooseVersion - -from numpy.distutils import log -from numpy.distutils.exec_command import ( - filepath_from_subprocess_output, forward_bytes_to_stdout -) -from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ - get_num_build_jobs, \ - _commandline_dep_string, \ - sanitize_cxx_flags - -# globals for parallel build management -import threading - -_job_semaphore = None -_global_lock = threading.Lock() -_processing_files = set() - - -def _needs_build(obj, cc_args, extra_postargs, pp_opts): - """ - Check if an objects needs to be rebuild based on its dependencies - - Parameters - ---------- - obj : str - object file - - Returns - ------- - bool - """ - # defined in unixcompiler.py - dep_file = obj + '.d' - if not os.path.exists(dep_file): - return True - - # dep_file is a makefile containing 'object: dependencies' - # formatted like posix shell (spaces escaped, \ line continuations) - # the last line contains the compiler commandline arguments as some - # projects may compile an extension multiple times with different - # arguments - with open(dep_file) as f: - lines = f.readlines() - - cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts) - last_cmdline = lines[-1] - if last_cmdline != cmdline: - return True - - contents = ''.join(lines[:-1]) - deps = [x for x in shlex.split(contents, posix=True) - if x != "\n" and not x.endswith(":")] - - try: - t_obj = os.stat(obj).st_mtime - - # check if any of the dependencies is newer than the object - # the dependencies includes the source used to create the object - for f in deps: - if os.stat(f).st_mtime > t_obj: - return True - except OSError: - # no object counts as newer (shouldn't happen if dep_file exists) - return True - - return False - - -def replace_method(klass, method_name, func): - # Py3k does not have unbound method anymore, MethodType does not work - m = lambda self, *args, **kw: func(self, *args, **kw) - setattr(klass, method_name, m) - - -###################################################################### -## Method that subclasses may redefine. But don't call this method, -## it i private to CCompiler class and may return unexpected -## results if used elsewhere. So, you have been warned.. - -def CCompiler_find_executables(self): - """ - Does nothing here, but is called by the get_version method and can be - overridden by subclasses. In particular it is redefined in the `FCompiler` - class where more documentation can be found. - - """ - pass - - -replace_method(CCompiler, 'find_executables', CCompiler_find_executables) - - -# Using customized CCompiler.spawn. -def CCompiler_spawn(self, cmd, display=None, env=None): - """ - Execute a command in a sub-process. - - Parameters - ---------- - cmd : str - The command to execute. - display : str or sequence of str, optional - The text to add to the log file kept by `numpy.distutils`. - If not given, `display` is equal to `cmd`. - env : a dictionary for environment variables, optional - - Returns - ------- - None - - Raises - ------ - DistutilsExecError - If the command failed, i.e. the exit status was not 0. - - """ - env = env if env is not None else dict(os.environ) - if display is None: - display = cmd - if is_sequence(display): - display = ' '.join(list(display)) - log.info(display) - try: - if self.verbose: - subprocess.check_output(cmd, env=env) - else: - subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env) - except subprocess.CalledProcessError as exc: - o = exc.output - s = exc.returncode - except OSError as e: - # OSError doesn't have the same hooks for the exception - # output, but exec_command() historically would use an - # empty string for EnvironmentError (base class for - # OSError) - # o = b'' - # still that would make the end-user lost in translation! - o = f"\n\n{e}\n\n\n" - try: - o = o.encode(sys.stdout.encoding) - except AttributeError: - o = o.encode('utf8') - # status previously used by exec_command() for parent - # of OSError - s = 127 - else: - # use a convenience return here so that any kind of - # caught exception will execute the default code after the - # try / except block, which handles various exceptions - return None - - if is_sequence(cmd): - cmd = ' '.join(list(cmd)) - - if self.verbose: - forward_bytes_to_stdout(o) - - if re.search(b'Too many open files', o): - msg = '\nTry rerunning setup command until build succeeds.' - else: - msg = '' - raise DistutilsExecError('Command "%s" failed with exit status %d%s' % - (cmd, s, msg)) - -replace_method(CCompiler, 'spawn', CCompiler_spawn) - -def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''): - """ - Return the name of the object files for the given source files. - - Parameters - ---------- - source_filenames : list of str - The list of paths to source files. Paths can be either relative or - absolute, this is handled transparently. - strip_dir : bool, optional - Whether to strip the directory from the returned paths. If True, - the file name prepended by `output_dir` is returned. Default is False. - output_dir : str, optional - If given, this path is prepended to the returned paths to the - object files. - - Returns - ------- - obj_names : list of str - The list of paths to the object files corresponding to the source - files in `source_filenames`. - - """ - if output_dir is None: - output_dir = '' - obj_names = [] - for src_name in source_filenames: - base, ext = os.path.splitext(os.path.normpath(src_name)) - base = os.path.splitdrive(base)[1] # Chop off the drive - base = base[os.path.isabs(base):] # If abs, chop off leading / - if base.startswith('..'): - # Resolve starting relative path components, middle ones - # (if any) have been handled by os.path.normpath above. - i = base.rfind('..')+2 - d = base[:i] - d = os.path.basename(os.path.abspath(d)) - base = d + base[i:] - if ext not in self.src_extensions: - raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name)) - if strip_dir: - base = os.path.basename(base) - obj_name = os.path.join(output_dir, base + self.obj_extension) - obj_names.append(obj_name) - return obj_names - -replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames) - -def CCompiler_compile(self, sources, output_dir=None, macros=None, - include_dirs=None, debug=0, extra_preargs=None, - extra_postargs=None, depends=None): - """ - Compile one or more source files. - - Please refer to the Python distutils API reference for more details. - - Parameters - ---------- - sources : list of str - A list of filenames - output_dir : str, optional - Path to the output directory. - macros : list of tuples - A list of macro definitions. - include_dirs : list of str, optional - The directories to add to the default include file search path for - this compilation only. - debug : bool, optional - Whether or not to output debug symbols in or alongside the object - file(s). - extra_preargs, extra_postargs : ? - Extra pre- and post-arguments. - depends : list of str, optional - A list of file names that all targets depend on. - - Returns - ------- - objects : list of str - A list of object file names, one per source file `sources`. - - Raises - ------ - CompileError - If compilation fails. - - """ - global _job_semaphore - - jobs = get_num_build_jobs() - - # setup semaphore to not exceed number of compile jobs when parallelized at - # extension level (python >= 3.5) - with _global_lock: - if _job_semaphore is None: - _job_semaphore = threading.Semaphore(jobs) - - if not sources: - return [] - from numpy.distutils.fcompiler import (FCompiler, - FORTRAN_COMMON_FIXED_EXTENSIONS, - has_f90_header) - if isinstance(self, FCompiler): - display = [] - for fc in ['f77', 'f90', 'fix']: - fcomp = getattr(self, 'compiler_'+fc) - if fcomp is None: - continue - display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp))) - display = '\n'.join(display) - else: - ccomp = self.compiler_so - display = "C compiler: %s\n" % (' '.join(ccomp),) - log.info(display) - macros, objects, extra_postargs, pp_opts, build = \ - self._setup_compile(output_dir, macros, include_dirs, sources, - depends, extra_postargs) - cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) - display = "compile options: '%s'" % (' '.join(cc_args)) - if extra_postargs: - display += "\nextra options: '%s'" % (' '.join(extra_postargs)) - log.info(display) - - def single_compile(args): - obj, (src, ext) = args - if not _needs_build(obj, cc_args, extra_postargs, pp_opts): - return - - # check if we are currently already processing the same object - # happens when using the same source in multiple extensions - while True: - # need explicit lock as there is no atomic check and add with GIL - with _global_lock: - # file not being worked on, start working - if obj not in _processing_files: - _processing_files.add(obj) - break - # wait for the processing to end - time.sleep(0.1) - - try: - # retrieve slot from our #job semaphore and build - with _job_semaphore: - self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) - finally: - # register being done processing - with _global_lock: - _processing_files.remove(obj) - - - if isinstance(self, FCompiler): - objects_to_build = list(build.keys()) - f77_objects, other_objects = [], [] - for obj in objects: - if obj in objects_to_build: - src, ext = build[obj] - if self.compiler_type=='absoft': - obj = cyg2win32(obj) - src = cyg2win32(src) - if Path(src).suffix.lower() in FORTRAN_COMMON_FIXED_EXTENSIONS \ - and not has_f90_header(src): - f77_objects.append((obj, (src, ext))) - else: - other_objects.append((obj, (src, ext))) - - # f77 objects can be built in parallel - build_items = f77_objects - # build f90 modules serial, module files are generated during - # compilation and may be used by files later in the list so the - # ordering is important - for o in other_objects: - single_compile(o) - else: - build_items = build.items() - - if len(build) > 1 and jobs > 1: - # build parallel - from concurrent.futures import ThreadPoolExecutor - with ThreadPoolExecutor(jobs) as pool: - res = pool.map(single_compile, build_items) - list(res) # access result to raise errors - else: - # build serial - for o in build_items: - single_compile(o) - - # Return *all* object filenames, not just the ones we just built. - return objects - -replace_method(CCompiler, 'compile', CCompiler_compile) - -def CCompiler_customize_cmd(self, cmd, ignore=()): - """ - Customize compiler using distutils command. - - Parameters - ---------- - cmd : class instance - An instance inheriting from ``distutils.cmd.Command``. - ignore : sequence of str, optional - List of ``distutils.ccompiler.CCompiler`` commands (without ``'set_'``) that should not be - altered. Strings that are checked for are: - ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs', - 'rpath', 'link_objects')``. - - Returns - ------- - None - - """ - log.info('customize %s using %s' % (self.__class__.__name__, - cmd.__class__.__name__)) - - if ( - hasattr(self, 'compiler') and - 'clang' in self.compiler[0] and - not (platform.machine() == 'arm64' and sys.platform == 'darwin') - ): - # clang defaults to a non-strict floating error point model. - # However, '-ftrapping-math' is not currently supported (2023-04-08) - # for macosx_arm64. - # Since NumPy and most Python libs give warnings for these, override: - self.compiler.append('-ftrapping-math') - self.compiler_so.append('-ftrapping-math') - - def allow(attr): - return getattr(cmd, attr, None) is not None and attr not in ignore - - if allow('include_dirs'): - self.set_include_dirs(cmd.include_dirs) - if allow('define'): - for (name, value) in cmd.define: - self.define_macro(name, value) - if allow('undef'): - for macro in cmd.undef: - self.undefine_macro(macro) - if allow('libraries'): - self.set_libraries(self.libraries + cmd.libraries) - if allow('library_dirs'): - self.set_library_dirs(self.library_dirs + cmd.library_dirs) - if allow('rpath'): - self.set_runtime_library_dirs(cmd.rpath) - if allow('link_objects'): - self.set_link_objects(cmd.link_objects) - -replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd) - -def _compiler_to_string(compiler): - props = [] - mx = 0 - keys = list(compiler.executables.keys()) - for key in ['version', 'libraries', 'library_dirs', - 'object_switch', 'compile_switch', - 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']: - if key not in keys: - keys.append(key) - for key in keys: - if hasattr(compiler, key): - v = getattr(compiler, key) - mx = max(mx, len(key)) - props.append((key, repr(v))) - fmt = '%-' + repr(mx+1) + 's = %s' - lines = [fmt % prop for prop in props] - return '\n'.join(lines) - -def CCompiler_show_customization(self): - """ - Print the compiler customizations to stdout. - - Parameters - ---------- - None - - Returns - ------- - None - - Notes - ----- - Printing is only done if the distutils log threshold is < 2. - - """ - try: - self.get_version() - except Exception: - pass - if log._global_log.threshold<2: - print('*'*80) - print(self.__class__) - print(_compiler_to_string(self)) - print('*'*80) - -replace_method(CCompiler, 'show_customization', CCompiler_show_customization) - -def CCompiler_customize(self, dist, need_cxx=0): - """ - Do any platform-specific customization of a compiler instance. - - This method calls ``distutils.sysconfig.customize_compiler`` for - platform-specific customization, as well as optionally remove a flag - to suppress spurious warnings in case C++ code is being compiled. - - Parameters - ---------- - dist : object - This parameter is not used for anything. - need_cxx : bool, optional - Whether or not C++ has to be compiled. If so (True), the - ``"-Wstrict-prototypes"`` option is removed to prevent spurious - warnings. Default is False. - - Returns - ------- - None - - Notes - ----- - All the default options used by distutils can be extracted with:: - - from distutils import sysconfig - sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', - 'CCSHARED', 'LDSHARED', 'SO') - - """ - # See FCompiler.customize for suggested usage. - log.info('customize %s' % (self.__class__.__name__)) - customize_compiler(self) - if need_cxx: - # In general, distutils uses -Wstrict-prototypes, but this option is - # not valid for C++ code, only for C. Remove it if it's there to - # avoid a spurious warning on every compilation. - try: - self.compiler_so.remove('-Wstrict-prototypes') - except (AttributeError, ValueError): - pass - - if hasattr(self, 'compiler') and 'cc' in self.compiler[0]: - if not self.compiler_cxx: - if self.compiler[0].startswith('gcc'): - a, b = 'gcc', 'g++' - else: - a, b = 'cc', 'c++' - self.compiler_cxx = [self.compiler[0].replace(a, b)]\ - + self.compiler[1:] - else: - if hasattr(self, 'compiler'): - log.warn("#### %s #######" % (self.compiler,)) - if not hasattr(self, 'compiler_cxx'): - log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__) - - - # check if compiler supports gcc style automatic dependencies - # run on every extension so skip for known good compilers - if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or - 'g++' in self.compiler[0] or - 'clang' in self.compiler[0]): - self._auto_depends = True - elif os.name == 'posix': - import tempfile - import shutil - tmpdir = tempfile.mkdtemp() - try: - fn = os.path.join(tmpdir, "file.c") - with open(fn, "w") as f: - f.write("int a;\n") - self.compile([fn], output_dir=tmpdir, - extra_preargs=['-MMD', '-MF', fn + '.d']) - self._auto_depends = True - except CompileError: - self._auto_depends = False - finally: - shutil.rmtree(tmpdir) - - return - -replace_method(CCompiler, 'customize', CCompiler_customize) - -def simple_version_match(pat=r'[-.\d]+', ignore='', start=''): - """ - Simple matching of version numbers, for use in CCompiler and FCompiler. - - Parameters - ---------- - pat : str, optional - A regular expression matching version numbers. - Default is ``r'[-.\\d]+'``. - ignore : str, optional - A regular expression matching patterns to skip. - Default is ``''``, in which case nothing is skipped. - start : str, optional - A regular expression matching the start of where to start looking - for version numbers. - Default is ``''``, in which case searching is started at the - beginning of the version string given to `matcher`. - - Returns - ------- - matcher : callable - A function that is appropriate to use as the ``.version_match`` - attribute of a ``distutils.ccompiler.CCompiler`` class. `matcher` takes a single parameter, - a version string. - - """ - def matcher(self, version_string): - # version string may appear in the second line, so getting rid - # of new lines: - version_string = version_string.replace('\n', ' ') - pos = 0 - if start: - m = re.match(start, version_string) - if not m: - return None - pos = m.end() - while True: - m = re.search(pat, version_string[pos:]) - if not m: - return None - if ignore and re.match(ignore, m.group(0)): - pos = m.end() - continue - break - return m.group(0) - return matcher - -def CCompiler_get_version(self, force=False, ok_status=[0]): - """ - Return compiler version, or None if compiler is not available. - - Parameters - ---------- - force : bool, optional - If True, force a new determination of the version, even if the - compiler already has a version attribute. Default is False. - ok_status : list of int, optional - The list of status values returned by the version look-up process - for which a version string is returned. If the status value is not - in `ok_status`, None is returned. Default is ``[0]``. - - Returns - ------- - version : str or None - Version string, in the format of ``distutils.version.LooseVersion``. - - """ - if not force and hasattr(self, 'version'): - return self.version - self.find_executables() - try: - version_cmd = self.version_cmd - except AttributeError: - return None - if not version_cmd or not version_cmd[0]: - return None - try: - matcher = self.version_match - except AttributeError: - try: - pat = self.version_pattern - except AttributeError: - return None - def matcher(version_string): - m = re.match(pat, version_string) - if not m: - return None - version = m.group('version') - return version - - try: - output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as exc: - output = exc.output - status = exc.returncode - except OSError: - # match the historical returns for a parent - # exception class caught by exec_command() - status = 127 - output = b'' - else: - # output isn't actually a filepath but we do this - # for now to match previous distutils behavior - output = filepath_from_subprocess_output(output) - status = 0 - - version = None - if status in ok_status: - version = matcher(output) - if version: - version = LooseVersion(version) - self.version = version - return version - -replace_method(CCompiler, 'get_version', CCompiler_get_version) - -def CCompiler_cxx_compiler(self): - """ - Return the C++ compiler. - - Parameters - ---------- - None - - Returns - ------- - cxx : class instance - The C++ compiler, as a ``distutils.ccompiler.CCompiler`` instance. - - """ - if self.compiler_type in ('msvc', 'intelw', 'intelemw'): - return self - - cxx = copy(self) - cxx.compiler_cxx = cxx.compiler_cxx - cxx.compiler_so = [cxx.compiler_cxx[0]] + \ - sanitize_cxx_flags(cxx.compiler_so[1:]) - if (sys.platform.startswith(('aix', 'os400')) and - 'ld_so_aix' in cxx.linker_so[0]): - # AIX needs the ld_so_aix script included with Python - cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \ - + cxx.linker_so[2:] - if sys.platform.startswith('os400'): - #This is required by i 7.4 and prievous for PRId64 in printf() call. - cxx.compiler_so.append('-D__STDC_FORMAT_MACROS') - #This a bug of gcc10.3, which failed to handle the TLS init. - cxx.compiler_so.append('-fno-extern-tls-init') - cxx.linker_so.append('-fno-extern-tls-init') - else: - cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:] - return cxx - -replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler) - -compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler', - "Intel C Compiler for 32-bit applications") -compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler', - "Intel C Itanium Compiler for Itanium-based applications") -compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler', - "Intel C Compiler for 64-bit applications") -compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW', - "Intel C Compiler for 32-bit applications on Windows") -compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW', - "Intel C Compiler for 64-bit applications on Windows") -compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler', - "PathScale Compiler for SiCortex-based applications") -compiler_class['arm'] = ('armccompiler', 'ArmCCompiler', - "Arm C Compiler") -compiler_class['fujitsu'] = ('fujitsuccompiler', 'FujitsuCCompiler', - "Fujitsu C Compiler") - -ccompiler._default_compilers += (('linux.*', 'intel'), - ('linux.*', 'intele'), - ('linux.*', 'intelem'), - ('linux.*', 'pathcc'), - ('nt', 'intelw'), - ('nt', 'intelemw')) - -if sys.platform == 'win32': - compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler', - "Mingw32 port of GNU C Compiler for Win32"\ - "(for MSC built Python)") - if mingw32(): - # On windows platforms, we want to default to mingw32 (gcc) - # because msvc can't build blitz stuff. - log.info('Setting mingw32 as default compiler for nt.') - ccompiler._default_compilers = (('nt', 'mingw32'),) \ - + ccompiler._default_compilers - - -_distutils_new_compiler = new_compiler -def new_compiler (plat=None, - compiler=None, - verbose=None, - dry_run=0, - force=0): - # Try first C compilers from numpy.distutils. - if verbose is None: - verbose = log.get_threshold() <= log.INFO - if plat is None: - plat = os.name - try: - if compiler is None: - compiler = get_default_compiler(plat) - (module_name, class_name, long_description) = compiler_class[compiler] - except KeyError: - msg = "don't know how to compile C/C++ code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler" % compiler - raise DistutilsPlatformError(msg) - module_name = "numpy.distutils." + module_name - try: - __import__ (module_name) - except ImportError as e: - msg = str(e) - log.info('%s in numpy.distutils; trying from distutils', - str(msg)) - module_name = module_name[6:] - try: - __import__(module_name) - except ImportError as e: - msg = str(e) - raise DistutilsModuleError("can't compile C/C++ code: unable to load " - "module '%s'" % module_name) - try: - module = sys.modules[module_name] - klass = vars(module)[class_name] - except KeyError: - raise DistutilsModuleError(("can't compile C/C++ code: unable to find " - "class '%s' in module '%s'") % (class_name, module_name)) - compiler = klass(None, dry_run, force) - compiler.verbose = verbose - log.debug('new_compiler returns %s' % (klass)) - return compiler - -ccompiler.new_compiler = new_compiler - -_distutils_gen_lib_options = gen_lib_options -def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): - # the version of this function provided by CPython allows the following - # to return lists, which are unpacked automatically: - # - compiler.runtime_library_dir_option - # our version extends the behavior to: - # - compiler.library_dir_option - # - compiler.library_option - # - compiler.find_library_file - r = _distutils_gen_lib_options(compiler, library_dirs, - runtime_library_dirs, libraries) - lib_opts = [] - for i in r: - if is_sequence(i): - lib_opts.extend(list(i)) - else: - lib_opts.append(i) - return lib_opts -ccompiler.gen_lib_options = gen_lib_options - -# Also fix up the various compiler modules, which do -# from distutils.ccompiler import gen_lib_options -# Don't bother with mwerks, as we don't support Classic Mac. -for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: - _m = sys.modules.get('distutils.' + _cc + 'compiler') - if _m is not None: - setattr(_m, 'gen_lib_options', gen_lib_options) - diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py deleted file mode 100644 index 4dea2f9b1da1..000000000000 --- a/numpy/distutils/ccompiler_opt.py +++ /dev/null @@ -1,2668 +0,0 @@ -"""Provides the `CCompilerOpt` class, used for handling the CPU/hardware -optimization, starting from parsing the command arguments, to managing the -relation between the CPU baseline and dispatch-able features, -also generating the required C headers and ending with compiling -the sources with proper compiler's flags. - -`CCompilerOpt` doesn't provide runtime detection for the CPU features, -instead only focuses on the compiler side, but it creates abstract C headers -that can be used later for the final runtime dispatching process.""" - -import atexit -import inspect -import os -import pprint -import re -import subprocess -import textwrap - -class _Config: - """An abstract class holds all configurable attributes of `CCompilerOpt`, - these class attributes can be used to change the default behavior - of `CCompilerOpt` in order to fit other requirements. - - Attributes - ---------- - conf_nocache : bool - Set True to disable memory and file cache. - Default is False. - - conf_noopt : bool - Set True to forces the optimization to be disabled, - in this case `CCompilerOpt` tends to generate all - expected headers in order to 'not' break the build. - Default is False. - - conf_cache_factors : list - Add extra factors to the primary caching factors. The caching factors - are utilized to determine if there are changes had happened that - requires to discard the cache and re-updating it. The primary factors - are the arguments of `CCompilerOpt` and `CCompiler`'s properties(type, flags, etc). - Default is list of two items, containing the time of last modification - of `ccompiler_opt` and value of attribute "conf_noopt" - - conf_tmp_path : str, - The path of temporary directory. Default is auto-created - temporary directory via ``tempfile.mkdtemp()``. - - conf_check_path : str - The path of testing files. Each added CPU feature must have a - **C** source file contains at least one intrinsic or instruction that - related to this feature, so it can be tested against the compiler. - Default is ``./distutils/checks``. - - conf_target_groups : dict - Extra tokens that can be reached from dispatch-able sources through - the special mark ``@targets``. Default is an empty dictionary. - - **Notes**: - - case-insensitive for tokens and group names - - sign '#' must stick in the begin of group name and only within ``@targets`` - - **Example**: - .. code-block:: console - - $ "@targets #avx_group other_tokens" > group_inside.c - - >>> CCompilerOpt.conf_target_groups["avx_group"] = \\ - "$werror $maxopt avx2 avx512f avx512_skx" - >>> cco = CCompilerOpt(cc_instance) - >>> cco.try_dispatch(["group_inside.c"]) - - conf_c_prefix : str - The prefix of public C definitions. Default is ``"NPY_"``. - - conf_c_prefix_ : str - The prefix of internal C definitions. Default is ``"NPY__"``. - - conf_cc_flags : dict - Nested dictionaries defining several compiler flags - that linked to some major functions, the main key - represent the compiler name and sub-keys represent - flags names. Default is already covers all supported - **C** compilers. - - Sub-keys explained as follows: - - "native": str or None - used by argument option `native`, to detect the current - machine support via the compiler. - "werror": str or None - utilized to treat warning as errors during testing CPU features - against the compiler and also for target's policy `$werror` - via dispatch-able sources. - "maxopt": str or None - utilized for target's policy '$maxopt' and the value should - contains the maximum acceptable optimization by the compiler. - e.g. in gcc ``'-O3'`` - - **Notes**: - * case-sensitive for compiler names and flags - * use space to separate multiple flags - * any flag will tested against the compiler and it will skipped - if it's not applicable. - - conf_min_features : dict - A dictionary defines the used CPU features for - argument option ``'min'``, the key represent the CPU architecture - name e.g. ``'x86'``. Default values provide the best effort - on wide range of users platforms. - - **Note**: case-sensitive for architecture names. - - conf_features : dict - Nested dictionaries used for identifying the CPU features. - the primary key is represented as a feature name or group name - that gathers several features. Default values covers all - supported features but without the major options like "flags", - these undefined options handle it by method `conf_features_partial()`. - Default value is covers almost all CPU features for *X86*, *IBM/Power64* - and *ARM 7/8*. - - Sub-keys explained as follows: - - "implies" : str or list, optional, - List of CPU feature names to be implied by it, - the feature name must be defined within `conf_features`. - Default is None. - - "flags": str or list, optional - List of compiler flags. Default is None. - - "detect": str or list, optional - List of CPU feature names that required to be detected - in runtime. By default, its the feature name or features - in "group" if its specified. - - "implies_detect": bool, optional - If True, all "detect" of implied features will be combined. - Default is True. see `feature_detect()`. - - "group": str or list, optional - Same as "implies" but doesn't require the feature name to be - defined within `conf_features`. - - "interest": int, required - a key for sorting CPU features - - "headers": str or list, optional - intrinsics C header file - - "disable": str, optional - force disable feature, the string value should contains the - reason of disabling. - - "autovec": bool or None, optional - True or False to declare that CPU feature can be auto-vectorized - by the compiler. - By default(None), treated as True if the feature contains at - least one applicable flag. see `feature_can_autovec()` - - "extra_checks": str or list, optional - Extra test case names for the CPU feature that need to be tested - against the compiler. - - Each test case must have a C file named ``extra_xxxx.c``, where - ``xxxx`` is the case name in lower case, under 'conf_check_path'. - It should contain at least one intrinsic or function related to the test case. - - If the compiler able to successfully compile the C file then `CCompilerOpt` - will add a C ``#define`` for it into the main dispatch header, e.g. - ``#define {conf_c_prefix}_XXXX`` where ``XXXX`` is the case name in upper case. - - **NOTES**: - * space can be used as separator with options that supports "str or list" - * case-sensitive for all values and feature name must be in upper-case. - * if flags aren't applicable, its will skipped rather than disable the - CPU feature - * the CPU feature will disabled if the compiler fail to compile - the test file - """ - conf_nocache = False - conf_noopt = False - conf_cache_factors = None - conf_tmp_path = None - conf_check_path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "checks" - ) - conf_target_groups = {} - conf_c_prefix = 'NPY_' - conf_c_prefix_ = 'NPY__' - conf_cc_flags = dict( - gcc = dict( - # native should always fail on arm and ppc64, - # native usually works only with x86 - native = '-march=native', - opt = '-O3', - werror = '-Werror', - ), - clang = dict( - native = '-march=native', - opt = "-O3", - # One of the following flags needs to be applicable for Clang to - # guarantee the sanity of the testing process, however in certain - # cases `-Werror` gets skipped during the availability test due to - # "unused arguments" warnings. - # see https://github.com/numpy/numpy/issues/19624 - werror = '-Werror=switch -Werror', - ), - icc = dict( - native = '-xHost', - opt = '-O3', - werror = '-Werror', - ), - iccw = dict( - native = '/QxHost', - opt = '/O3', - werror = '/Werror', - ), - msvc = dict( - native = None, - opt = '/O2', - werror = '/WX', - ), - fcc = dict( - native = '-mcpu=a64fx', - opt = None, - werror = None, - ) - ) - conf_min_features = dict( - x86 = "SSE SSE2", - x64 = "SSE SSE2 SSE3", - ppc64 = '', # play it safe - ppc64le = "VSX VSX2", - s390x = '', - armhf = '', # play it safe - aarch64 = "NEON NEON_FP16 NEON_VFPV4 ASIMD" - ) - conf_features = dict( - # X86 - SSE = dict( - interest=1, headers="xmmintrin.h", - # enabling SSE without SSE2 is useless also - # it's non-optional for x86_64 - implies="SSE2" - ), - SSE2 = dict(interest=2, implies="SSE", headers="emmintrin.h"), - SSE3 = dict(interest=3, implies="SSE2", headers="pmmintrin.h"), - SSSE3 = dict(interest=4, implies="SSE3", headers="tmmintrin.h"), - SSE41 = dict(interest=5, implies="SSSE3", headers="smmintrin.h"), - POPCNT = dict(interest=6, implies="SSE41", headers="popcntintrin.h"), - SSE42 = dict(interest=7, implies="POPCNT"), - AVX = dict( - interest=8, implies="SSE42", headers="immintrin.h", - implies_detect=False - ), - XOP = dict(interest=9, implies="AVX", headers="x86intrin.h"), - FMA4 = dict(interest=10, implies="AVX", headers="x86intrin.h"), - F16C = dict(interest=11, implies="AVX"), - FMA3 = dict(interest=12, implies="F16C"), - AVX2 = dict(interest=13, implies="F16C"), - AVX512F = dict( - interest=20, implies="FMA3 AVX2", implies_detect=False, - extra_checks="AVX512F_REDUCE" - ), - AVX512CD = dict(interest=21, implies="AVX512F"), - AVX512_KNL = dict( - interest=40, implies="AVX512CD", group="AVX512ER AVX512PF", - detect="AVX512_KNL", implies_detect=False - ), - AVX512_KNM = dict( - interest=41, implies="AVX512_KNL", - group="AVX5124FMAPS AVX5124VNNIW AVX512VPOPCNTDQ", - detect="AVX512_KNM", implies_detect=False - ), - AVX512_SKX = dict( - interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ", - detect="AVX512_SKX", implies_detect=False, - extra_checks="AVX512BW_MASK AVX512DQ_MASK" - ), - AVX512_CLX = dict( - interest=43, implies="AVX512_SKX", group="AVX512VNNI", - detect="AVX512_CLX" - ), - AVX512_CNL = dict( - interest=44, implies="AVX512_SKX", group="AVX512IFMA AVX512VBMI", - detect="AVX512_CNL", implies_detect=False - ), - AVX512_ICL = dict( - interest=45, implies="AVX512_CLX AVX512_CNL", - group="AVX512VBMI2 AVX512BITALG AVX512VPOPCNTDQ", - detect="AVX512_ICL", implies_detect=False - ), - AVX512_SPR = dict( - interest=46, implies="AVX512_ICL", group="AVX512FP16", - detect="AVX512_SPR", implies_detect=False - ), - # IBM/Power - ## Power7/ISA 2.06 - VSX = dict(interest=1, headers="altivec.h", extra_checks="VSX_ASM"), - ## Power8/ISA 2.07 - VSX2 = dict(interest=2, implies="VSX", implies_detect=False), - ## Power9/ISA 3.00 - VSX3 = dict(interest=3, implies="VSX2", implies_detect=False, - extra_checks="VSX3_HALF_DOUBLE"), - ## Power10/ISA 3.1 - VSX4 = dict(interest=4, implies="VSX3", implies_detect=False, - extra_checks="VSX4_MMA"), - # IBM/Z - ## VX(z13) support - VX = dict(interest=1, headers="vecintrin.h"), - ## Vector-Enhancements Facility - VXE = dict(interest=2, implies="VX", implies_detect=False), - ## Vector-Enhancements Facility 2 - VXE2 = dict(interest=3, implies="VXE", implies_detect=False), - # ARM - NEON = dict(interest=1, headers="arm_neon.h"), - NEON_FP16 = dict(interest=2, implies="NEON"), - ## FMA - NEON_VFPV4 = dict(interest=3, implies="NEON_FP16"), - ## Advanced SIMD - ASIMD = dict(interest=4, implies="NEON_FP16 NEON_VFPV4", implies_detect=False), - ## ARMv8.2 half-precision & vector arithm - ASIMDHP = dict(interest=5, implies="ASIMD"), - ## ARMv8.2 dot product - ASIMDDP = dict(interest=6, implies="ASIMD"), - ## ARMv8.2 Single & half-precision Multiply - ASIMDFHM = dict(interest=7, implies="ASIMDHP") - ) - def conf_features_partial(self): - """Return a dictionary of supported CPU features by the platform, - and accumulate the rest of undefined options in `conf_features`, - the returned dict has same rules and notes in - class attribute `conf_features`, also its override - any options that been set in 'conf_features'. - """ - if self.cc_noopt: - # optimization is disabled - return {} - - on_x86 = self.cc_on_x86 or self.cc_on_x64 - is_unix = self.cc_is_gcc or self.cc_is_clang or self.cc_is_fcc - - if on_x86 and is_unix: return dict( - SSE = dict(flags="-msse"), - SSE2 = dict(flags="-msse2"), - SSE3 = dict(flags="-msse3"), - SSSE3 = dict(flags="-mssse3"), - SSE41 = dict(flags="-msse4.1"), - POPCNT = dict(flags="-mpopcnt"), - SSE42 = dict(flags="-msse4.2"), - AVX = dict(flags="-mavx"), - F16C = dict(flags="-mf16c"), - XOP = dict(flags="-mxop"), - FMA4 = dict(flags="-mfma4"), - FMA3 = dict(flags="-mfma"), - AVX2 = dict(flags="-mavx2"), - AVX512F = dict(flags="-mavx512f -mno-mmx"), - AVX512CD = dict(flags="-mavx512cd"), - AVX512_KNL = dict(flags="-mavx512er -mavx512pf"), - AVX512_KNM = dict( - flags="-mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq" - ), - AVX512_SKX = dict(flags="-mavx512vl -mavx512bw -mavx512dq"), - AVX512_CLX = dict(flags="-mavx512vnni"), - AVX512_CNL = dict(flags="-mavx512ifma -mavx512vbmi"), - AVX512_ICL = dict( - flags="-mavx512vbmi2 -mavx512bitalg -mavx512vpopcntdq" - ), - AVX512_SPR = dict(flags="-mavx512fp16"), - ) - if on_x86 and self.cc_is_icc: return dict( - SSE = dict(flags="-msse"), - SSE2 = dict(flags="-msse2"), - SSE3 = dict(flags="-msse3"), - SSSE3 = dict(flags="-mssse3"), - SSE41 = dict(flags="-msse4.1"), - POPCNT = {}, - SSE42 = dict(flags="-msse4.2"), - AVX = dict(flags="-mavx"), - F16C = {}, - XOP = dict(disable="Intel Compiler doesn't support it"), - FMA4 = dict(disable="Intel Compiler doesn't support it"), - # Intel Compiler doesn't support AVX2 or FMA3 independently - FMA3 = dict( - implies="F16C AVX2", flags="-march=core-avx2" - ), - AVX2 = dict(implies="FMA3", flags="-march=core-avx2"), - # Intel Compiler doesn't support AVX512F or AVX512CD independently - AVX512F = dict( - implies="AVX2 AVX512CD", flags="-march=common-avx512" - ), - AVX512CD = dict( - implies="AVX2 AVX512F", flags="-march=common-avx512" - ), - AVX512_KNL = dict(flags="-xKNL"), - AVX512_KNM = dict(flags="-xKNM"), - AVX512_SKX = dict(flags="-xSKYLAKE-AVX512"), - AVX512_CLX = dict(flags="-xCASCADELAKE"), - AVX512_CNL = dict(flags="-xCANNONLAKE"), - AVX512_ICL = dict(flags="-xICELAKE-CLIENT"), - AVX512_SPR = dict(disable="Not supported yet") - ) - if on_x86 and self.cc_is_iccw: return dict( - SSE = dict(flags="/arch:SSE"), - SSE2 = dict(flags="/arch:SSE2"), - SSE3 = dict(flags="/arch:SSE3"), - SSSE3 = dict(flags="/arch:SSSE3"), - SSE41 = dict(flags="/arch:SSE4.1"), - POPCNT = {}, - SSE42 = dict(flags="/arch:SSE4.2"), - AVX = dict(flags="/arch:AVX"), - F16C = {}, - XOP = dict(disable="Intel Compiler doesn't support it"), - FMA4 = dict(disable="Intel Compiler doesn't support it"), - # Intel Compiler doesn't support FMA3 or AVX2 independently - FMA3 = dict( - implies="F16C AVX2", flags="/arch:CORE-AVX2" - ), - AVX2 = dict( - implies="FMA3", flags="/arch:CORE-AVX2" - ), - # Intel Compiler doesn't support AVX512F or AVX512CD independently - AVX512F = dict( - implies="AVX2 AVX512CD", flags="/Qx:COMMON-AVX512" - ), - AVX512CD = dict( - implies="AVX2 AVX512F", flags="/Qx:COMMON-AVX512" - ), - AVX512_KNL = dict(flags="/Qx:KNL"), - AVX512_KNM = dict(flags="/Qx:KNM"), - AVX512_SKX = dict(flags="/Qx:SKYLAKE-AVX512"), - AVX512_CLX = dict(flags="/Qx:CASCADELAKE"), - AVX512_CNL = dict(flags="/Qx:CANNONLAKE"), - AVX512_ICL = dict(flags="/Qx:ICELAKE-CLIENT"), - AVX512_SPR = dict(disable="Not supported yet") - ) - if on_x86 and self.cc_is_msvc: return dict( - SSE = dict(flags="/arch:SSE") if self.cc_on_x86 else {}, - SSE2 = dict(flags="/arch:SSE2") if self.cc_on_x86 else {}, - SSE3 = {}, - SSSE3 = {}, - SSE41 = {}, - POPCNT = dict(headers="nmmintrin.h"), - SSE42 = {}, - AVX = dict(flags="/arch:AVX"), - F16C = {}, - XOP = dict(headers="ammintrin.h"), - FMA4 = dict(headers="ammintrin.h"), - # MSVC doesn't support FMA3 or AVX2 independently - FMA3 = dict( - implies="F16C AVX2", flags="/arch:AVX2" - ), - AVX2 = dict( - implies="F16C FMA3", flags="/arch:AVX2" - ), - # MSVC doesn't support AVX512F or AVX512CD independently, - # always generate instructions belong to (VL/VW/DQ) - AVX512F = dict( - implies="AVX2 AVX512CD AVX512_SKX", flags="/arch:AVX512" - ), - AVX512CD = dict( - implies="AVX512F AVX512_SKX", flags="/arch:AVX512" - ), - AVX512_KNL = dict( - disable="MSVC compiler doesn't support it" - ), - AVX512_KNM = dict( - disable="MSVC compiler doesn't support it" - ), - AVX512_SKX = dict(flags="/arch:AVX512"), - AVX512_CLX = {}, - AVX512_CNL = {}, - AVX512_ICL = {}, - AVX512_SPR= dict( - disable="MSVC compiler doesn't support it" - ) - ) - - on_power = self.cc_on_ppc64le or self.cc_on_ppc64 - if on_power: - partial = dict( - VSX = dict( - implies=("VSX2" if self.cc_on_ppc64le else ""), - flags="-mvsx" - ), - VSX2 = dict( - flags="-mcpu=power8", implies_detect=False - ), - VSX3 = dict( - flags="-mcpu=power9 -mtune=power9", implies_detect=False - ), - VSX4 = dict( - flags="-mcpu=power10 -mtune=power10", implies_detect=False - ) - ) - if self.cc_is_clang: - partial["VSX"]["flags"] = "-maltivec -mvsx" - partial["VSX2"]["flags"] = "-mcpu=power8" - partial["VSX3"]["flags"] = "-mcpu=power9" - partial["VSX4"]["flags"] = "-mcpu=power10" - - return partial - - on_zarch = self.cc_on_s390x - if on_zarch: - partial = dict( - VX = dict( - flags="-march=arch11 -mzvector" - ), - VXE = dict( - flags="-march=arch12", implies_detect=False - ), - VXE2 = dict( - flags="-march=arch13", implies_detect=False - ) - ) - - return partial - - - if self.cc_on_aarch64 and is_unix: return dict( - NEON = dict( - implies="NEON_FP16 NEON_VFPV4 ASIMD", autovec=True - ), - NEON_FP16 = dict( - implies="NEON NEON_VFPV4 ASIMD", autovec=True - ), - NEON_VFPV4 = dict( - implies="NEON NEON_FP16 ASIMD", autovec=True - ), - ASIMD = dict( - implies="NEON NEON_FP16 NEON_VFPV4", autovec=True - ), - ASIMDHP = dict( - flags="-march=armv8.2-a+fp16" - ), - ASIMDDP = dict( - flags="-march=armv8.2-a+dotprod" - ), - ASIMDFHM = dict( - flags="-march=armv8.2-a+fp16fml" - ), - ) - if self.cc_on_armhf and is_unix: return dict( - NEON = dict( - flags="-mfpu=neon" - ), - NEON_FP16 = dict( - flags="-mfpu=neon-fp16 -mfp16-format=ieee" - ), - NEON_VFPV4 = dict( - flags="-mfpu=neon-vfpv4", - ), - ASIMD = dict( - flags="-mfpu=neon-fp-armv8 -march=armv8-a+simd", - ), - ASIMDHP = dict( - flags="-march=armv8.2-a+fp16" - ), - ASIMDDP = dict( - flags="-march=armv8.2-a+dotprod", - ), - ASIMDFHM = dict( - flags="-march=armv8.2-a+fp16fml" - ) - ) - # TODO: ARM MSVC - return {} - - def __init__(self): - if self.conf_tmp_path is None: - import shutil - import tempfile - tmp = tempfile.mkdtemp() - def rm_temp(): - try: - shutil.rmtree(tmp) - except OSError: - pass - atexit.register(rm_temp) - self.conf_tmp_path = tmp - - if self.conf_cache_factors is None: - self.conf_cache_factors = [ - os.path.getmtime(__file__), - self.conf_nocache - ] - -class _Distutils: - """A helper class that provides a collection of fundamental methods - implemented in a top of Python and NumPy Distutils. - - The idea behind this class is to gather all methods that it may - need to override in case of reuse 'CCompilerOpt' in environment - different than of what NumPy has. - - Parameters - ---------- - ccompiler : `CCompiler` - The generate instance that returned from `distutils.ccompiler.new_compiler()`. - """ - def __init__(self, ccompiler): - self._ccompiler = ccompiler - - def dist_compile(self, sources, flags, ccompiler=None, **kwargs): - """Wrap CCompiler.compile()""" - assert(isinstance(sources, list)) - assert(isinstance(flags, list)) - flags = kwargs.pop("extra_postargs", []) + flags - if not ccompiler: - ccompiler = self._ccompiler - - return ccompiler.compile(sources, extra_postargs=flags, **kwargs) - - def dist_test(self, source, flags, macros=[]): - """Return True if 'CCompiler.compile()' able to compile - a source file with certain flags. - """ - assert(isinstance(source, str)) - from distutils.errors import CompileError - cc = self._ccompiler; - bk_spawn = getattr(cc, 'spawn', None) - if bk_spawn: - cc_type = getattr(self._ccompiler, "compiler_type", "") - if cc_type in ("msvc",): - setattr(cc, 'spawn', self._dist_test_spawn_paths) - else: - setattr(cc, 'spawn', self._dist_test_spawn) - test = False - try: - self.dist_compile( - [source], flags, macros=macros, output_dir=self.conf_tmp_path - ) - test = True - except CompileError as e: - self.dist_log(str(e), stderr=True) - if bk_spawn: - setattr(cc, 'spawn', bk_spawn) - return test - - def dist_info(self): - """ - Return a tuple containing info about (platform, compiler, extra_args), - required by the abstract class '_CCompiler' for discovering the - platform environment. This is also used as a cache factor in order - to detect any changes happening from outside. - """ - if hasattr(self, "_dist_info"): - return self._dist_info - - cc_type = getattr(self._ccompiler, "compiler_type", '') - if cc_type in ("intelem", "intelemw"): - platform = "x86_64" - elif cc_type in ("intel", "intelw", "intele"): - platform = "x86" - else: - from distutils.util import get_platform - platform = get_platform() - - cc_info = getattr(self._ccompiler, "compiler", getattr(self._ccompiler, "compiler_so", '')) - if not cc_type or cc_type == "unix": - if hasattr(cc_info, "__iter__"): - compiler = cc_info[0] - else: - compiler = str(cc_info) - else: - compiler = cc_type - - if hasattr(cc_info, "__iter__") and len(cc_info) > 1: - extra_args = ' '.join(cc_info[1:]) - else: - extra_args = os.environ.get("CFLAGS", "") - extra_args += os.environ.get("CPPFLAGS", "") - - self._dist_info = (platform, compiler, extra_args) - return self._dist_info - - @staticmethod - def dist_error(*args): - """Raise a compiler error""" - from distutils.errors import CompileError - raise CompileError(_Distutils._dist_str(*args)) - - @staticmethod - def dist_fatal(*args): - """Raise a distutils error""" - from distutils.errors import DistutilsError - raise DistutilsError(_Distutils._dist_str(*args)) - - @staticmethod - def dist_log(*args, stderr=False): - """Print a console message""" - from numpy.distutils import log - out = _Distutils._dist_str(*args) - if stderr: - log.warn(out) - else: - log.info(out) - - @staticmethod - def dist_load_module(name, path): - """Load a module from file, required by the abstract class '_Cache'.""" - from .misc_util import exec_mod_from_location - try: - return exec_mod_from_location(name, path) - except Exception as e: - _Distutils.dist_log(e, stderr=True) - return None - - @staticmethod - def _dist_str(*args): - """Return a string to print by log and errors.""" - def to_str(arg): - if not isinstance(arg, str) and hasattr(arg, '__iter__'): - ret = [] - for a in arg: - ret.append(to_str(a)) - return '('+ ' '.join(ret) + ')' - return str(arg) - - stack = inspect.stack()[2] - start = "CCompilerOpt.%s[%d] : " % (stack.function, stack.lineno) - out = ' '.join([ - to_str(a) - for a in (*args,) - ]) - return start + out - - def _dist_test_spawn_paths(self, cmd, display=None): - """ - Fix msvc SDK ENV path same as distutils do - without it we get c1: fatal error C1356: unable to find mspdbcore.dll - """ - if not hasattr(self._ccompiler, "_paths"): - self._dist_test_spawn(cmd) - return - old_path = os.getenv("path") - try: - os.environ["path"] = self._ccompiler._paths - self._dist_test_spawn(cmd) - finally: - os.environ["path"] = old_path - - _dist_warn_regex = re.compile( - # intel and msvc compilers don't raise - # fatal errors when flags are wrong or unsupported - ".*(" - "warning D9002|" # msvc, it should be work with any language. - "invalid argument for option" # intel - ").*" - ) - @staticmethod - def _dist_test_spawn(cmd, display=None): - try: - o = subprocess.check_output(cmd, stderr=subprocess.STDOUT, - text=True) - if o and re.match(_Distutils._dist_warn_regex, o): - _Distutils.dist_error( - "Flags in command", cmd ,"aren't supported by the compiler" - ", output -> \n%s" % o - ) - except subprocess.CalledProcessError as exc: - o = exc.output - s = exc.returncode - except OSError as e: - o = e - s = 127 - else: - return None - _Distutils.dist_error( - "Command", cmd, "failed with exit status %d output -> \n%s" % ( - s, o - )) - -_share_cache = {} -class _Cache: - """An abstract class handles caching functionality, provides two - levels of caching, in-memory by share instances attributes among - each other and by store attributes into files. - - **Note**: - any attributes that start with ``_`` or ``conf_`` will be ignored. - - Parameters - ---------- - cache_path : str or None - The path of cache file, if None then cache in file will disabled. - - *factors : - The caching factors that need to utilize next to `conf_cache_factors`. - - Attributes - ---------- - cache_private : set - Hold the attributes that need be skipped from "in-memory cache". - - cache_infile : bool - Utilized during initializing this class, to determine if the cache was able - to loaded from the specified cache path in 'cache_path'. - """ - - # skip attributes from cache - _cache_ignore = re.compile("^(_|conf_)") - - def __init__(self, cache_path=None, *factors): - self.cache_me = {} - self.cache_private = set() - self.cache_infile = False - self._cache_path = None - - if self.conf_nocache: - self.dist_log("cache is disabled by `Config`") - return - - self._cache_hash = self.cache_hash(*factors, *self.conf_cache_factors) - self._cache_path = cache_path - if cache_path: - if os.path.exists(cache_path): - self.dist_log("load cache from file ->", cache_path) - cache_mod = self.dist_load_module("cache", cache_path) - if not cache_mod: - self.dist_log( - "unable to load the cache file as a module", - stderr=True - ) - elif not hasattr(cache_mod, "hash") or \ - not hasattr(cache_mod, "data"): - self.dist_log("invalid cache file", stderr=True) - elif self._cache_hash == cache_mod.hash: - self.dist_log("hit the file cache") - for attr, val in cache_mod.data.items(): - setattr(self, attr, val) - self.cache_infile = True - else: - self.dist_log("miss the file cache") - - if not self.cache_infile: - other_cache = _share_cache.get(self._cache_hash) - if other_cache: - self.dist_log("hit the memory cache") - for attr, val in other_cache.__dict__.items(): - if attr in other_cache.cache_private or \ - re.match(self._cache_ignore, attr): - continue - setattr(self, attr, val) - - _share_cache[self._cache_hash] = self - atexit.register(self.cache_flush) - - def __del__(self): - for h, o in _share_cache.items(): - if o == self: - _share_cache.pop(h) - break - - def cache_flush(self): - """ - Force update the cache. - """ - if not self._cache_path: - return - # TODO: don't write if the cache doesn't change - self.dist_log("write cache to path ->", self._cache_path) - cdict = self.__dict__.copy() - for attr in self.__dict__.keys(): - if re.match(self._cache_ignore, attr): - cdict.pop(attr) - - d = os.path.dirname(self._cache_path) - if not os.path.exists(d): - os.makedirs(d) - - repr_dict = pprint.pformat(cdict, compact=True) - with open(self._cache_path, "w") as f: - f.write(textwrap.dedent("""\ - # AUTOGENERATED DON'T EDIT - # Please make changes to the code generator \ - (distutils/ccompiler_opt.py) - hash = {} - data = \\ - """).format(self._cache_hash)) - f.write(repr_dict) - - def cache_hash(self, *factors): - # is there a built-in non-crypto hash? - # sdbm - chash = 0 - for f in factors: - for char in str(f): - chash = ord(char) + (chash << 6) + (chash << 16) - chash - chash &= 0xFFFFFFFF - return chash - - @staticmethod - def me(cb): - """ - A static method that can be treated as a decorator to - dynamically cache certain methods. - """ - def cache_wrap_me(self, *args, **kwargs): - # good for normal args - cache_key = str(( - cb.__name__, *args, *kwargs.keys(), *kwargs.values() - )) - if cache_key in self.cache_me: - return self.cache_me[cache_key] - ccb = cb(self, *args, **kwargs) - self.cache_me[cache_key] = ccb - return ccb - return cache_wrap_me - -class _CCompiler: - """A helper class for `CCompilerOpt` containing all utilities that - related to the fundamental compiler's functions. - - Attributes - ---------- - cc_on_x86 : bool - True when the target architecture is 32-bit x86 - cc_on_x64 : bool - True when the target architecture is 64-bit x86 - cc_on_ppc64 : bool - True when the target architecture is 64-bit big-endian powerpc - cc_on_ppc64le : bool - True when the target architecture is 64-bit litle-endian powerpc - cc_on_s390x : bool - True when the target architecture is IBM/ZARCH on linux - cc_on_armhf : bool - True when the target architecture is 32-bit ARMv7+ - cc_on_aarch64 : bool - True when the target architecture is 64-bit Armv8-a+ - cc_on_noarch : bool - True when the target architecture is unknown or not supported - cc_is_gcc : bool - True if the compiler is GNU or - if the compiler is unknown - cc_is_clang : bool - True if the compiler is Clang - cc_is_icc : bool - True if the compiler is Intel compiler (unix like) - cc_is_iccw : bool - True if the compiler is Intel compiler (msvc like) - cc_is_nocc : bool - True if the compiler isn't supported directly, - Note: that cause a fail-back to gcc - cc_has_debug : bool - True if the compiler has debug flags - cc_has_native : bool - True if the compiler has native flags - cc_noopt : bool - True if the compiler has definition 'DISABLE_OPT*', - or 'cc_on_noarch' is True - cc_march : str - The target architecture name, or "unknown" if - the architecture isn't supported - cc_name : str - The compiler name, or "unknown" if the compiler isn't supported - cc_flags : dict - Dictionary containing the initialized flags of `_Config.conf_cc_flags` - """ - def __init__(self): - if hasattr(self, "cc_is_cached"): - return - # attr regex compiler-expression - detect_arch = ( - ("cc_on_x64", ".*(x|x86_|amd)64.*", ""), - ("cc_on_x86", ".*(win32|x86|i386|i686).*", ""), - ("cc_on_ppc64le", ".*(powerpc|ppc)64(el|le).*|.*powerpc.*", - "defined(__powerpc64__) && " - "defined(__LITTLE_ENDIAN__)"), - ("cc_on_ppc64", ".*(powerpc|ppc).*|.*powerpc.*", - "defined(__powerpc64__) && " - "defined(__BIG_ENDIAN__)"), - ("cc_on_aarch64", ".*(aarch64|arm64).*", ""), - ("cc_on_armhf", ".*arm.*", "defined(__ARM_ARCH_7__) || " - "defined(__ARM_ARCH_7A__)"), - ("cc_on_s390x", ".*s390x.*", ""), - # undefined platform - ("cc_on_noarch", "", ""), - ) - detect_compiler = ( - ("cc_is_gcc", r".*(gcc|gnu\-g).*", ""), - ("cc_is_clang", ".*clang.*", ""), - # intel msvc like - ("cc_is_iccw", ".*(intelw|intelemw|iccw).*", ""), - ("cc_is_icc", ".*(intel|icc).*", ""), # intel unix like - ("cc_is_msvc", ".*msvc.*", ""), - ("cc_is_fcc", ".*fcc.*", ""), - # undefined compiler will be treat it as gcc - ("cc_is_nocc", "", ""), - ) - detect_args = ( - ("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*", ""), - ("cc_has_native", - ".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", ""), - # in case if the class run with -DNPY_DISABLE_OPTIMIZATION - ("cc_noopt", ".*DISABLE_OPT.*", ""), - ) - - dist_info = self.dist_info() - platform, compiler_info, extra_args = dist_info - # set False to all attrs - for section in (detect_arch, detect_compiler, detect_args): - for attr, rgex, cexpr in section: - setattr(self, attr, False) - - for detect, searchin in ((detect_arch, platform), (detect_compiler, compiler_info)): - for attr, rgex, cexpr in detect: - if rgex and not re.match(rgex, searchin, re.IGNORECASE): - continue - if cexpr and not self.cc_test_cexpr(cexpr): - continue - setattr(self, attr, True) - break - - for attr, rgex, cexpr in detect_args: - if rgex and not re.match(rgex, extra_args, re.IGNORECASE): - continue - if cexpr and not self.cc_test_cexpr(cexpr): - continue - setattr(self, attr, True) - - if self.cc_on_noarch: - self.dist_log( - "unable to detect CPU architecture which lead to disable the optimization. " - f"check dist_info:<<\n{dist_info}\n>>", - stderr=True - ) - self.cc_noopt = True - - if self.conf_noopt: - self.dist_log("Optimization is disabled by the Config", stderr=True) - self.cc_noopt = True - - if self.cc_is_nocc: - """ - mingw can be treated as a gcc, and also xlc even if it based on clang, - but still has the same gcc optimization flags. - """ - self.dist_log( - "unable to detect compiler type which leads to treating it as GCC. " - "this is a normal behavior if you're using gcc-like compiler such as MinGW or IBM/XLC." - f"check dist_info:<<\n{dist_info}\n>>", - stderr=True - ) - self.cc_is_gcc = True - - self.cc_march = "unknown" - for arch in ("x86", "x64", "ppc64", "ppc64le", - "armhf", "aarch64", "s390x"): - if getattr(self, "cc_on_" + arch): - self.cc_march = arch - break - - self.cc_name = "unknown" - for name in ("gcc", "clang", "iccw", "icc", "msvc", "fcc"): - if getattr(self, "cc_is_" + name): - self.cc_name = name - break - - self.cc_flags = {} - compiler_flags = self.conf_cc_flags.get(self.cc_name) - if compiler_flags is None: - self.dist_fatal( - "undefined flag for compiler '%s', " - "leave an empty dict instead" % self.cc_name - ) - for name, flags in compiler_flags.items(): - self.cc_flags[name] = nflags = [] - if flags: - assert(isinstance(flags, str)) - flags = flags.split() - for f in flags: - if self.cc_test_flags([f]): - nflags.append(f) - - self.cc_is_cached = True - - @_Cache.me - def cc_test_flags(self, flags): - """ - Returns True if the compiler supports 'flags'. - """ - assert(isinstance(flags, list)) - self.dist_log("testing flags", flags) - test_path = os.path.join(self.conf_check_path, "test_flags.c") - test = self.dist_test(test_path, flags) - if not test: - self.dist_log("testing failed", stderr=True) - return test - - @_Cache.me - def cc_test_cexpr(self, cexpr, flags=[]): - """ - Same as the above but supports compile-time expressions. - """ - self.dist_log("testing compiler expression", cexpr) - test_path = os.path.join(self.conf_tmp_path, "npy_dist_test_cexpr.c") - with open(test_path, "w") as fd: - fd.write(textwrap.dedent(f"""\ - #if !({cexpr}) - #error "unsupported expression" - #endif - int dummy; - """)) - test = self.dist_test(test_path, flags) - if not test: - self.dist_log("testing failed", stderr=True) - return test - - def cc_normalize_flags(self, flags): - """ - Remove the conflicts that caused due gathering implied features flags. - - Parameters - ---------- - 'flags' list, compiler flags - flags should be sorted from the lowest to the highest interest. - - Returns - ------- - list, filtered from any conflicts. - - Examples - -------- - >>> self.cc_normalize_flags(['-march=armv8.2-a+fp16', '-march=armv8.2-a+dotprod']) - ['armv8.2-a+fp16+dotprod'] - - >>> self.cc_normalize_flags( - ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx', '-march=core-avx2'] - ) - ['-march=core-avx2'] - """ - assert(isinstance(flags, list)) - if self.cc_is_gcc or self.cc_is_clang or self.cc_is_icc: - return self._cc_normalize_unix(flags) - - if self.cc_is_msvc or self.cc_is_iccw: - return self._cc_normalize_win(flags) - return flags - - _cc_normalize_unix_mrgx = re.compile( - # 1- to check the highest of - r"^(-mcpu=|-march=|-x[A-Z0-9\-])" - ) - _cc_normalize_unix_frgx = re.compile( - # 2- to remove any flags starts with - # -march, -mcpu, -x(INTEL) and '-m' without '=' - r"^(?!(-mcpu=|-march=|-x[A-Z0-9\-]|-m[a-z0-9\-\.]*.$))|" - # exclude: - r"(?:-mzvector)" - ) - _cc_normalize_unix_krgx = re.compile( - # 3- keep only the highest of - r"^(-mfpu|-mtune)" - ) - _cc_normalize_arch_ver = re.compile( - r"[0-9.]" - ) - def _cc_normalize_unix(self, flags): - def ver_flags(f): - # arch ver subflag - # -march=armv8.2-a+fp16fml - tokens = f.split('+') - ver = float('0' + ''.join( - re.findall(self._cc_normalize_arch_ver, tokens[0]) - )) - return ver, tokens[0], tokens[1:] - - if len(flags) <= 1: - return flags - # get the highest matched flag - for i, cur_flag in enumerate(reversed(flags)): - if not re.match(self._cc_normalize_unix_mrgx, cur_flag): - continue - lower_flags = flags[:-(i+1)] - upper_flags = flags[-i:] - filtered = list(filter( - self._cc_normalize_unix_frgx.search, lower_flags - )) - # gather subflags - ver, arch, subflags = ver_flags(cur_flag) - if ver > 0 and len(subflags) > 0: - for xflag in lower_flags: - xver, _, xsubflags = ver_flags(xflag) - if ver == xver: - subflags = xsubflags + subflags - cur_flag = arch + '+' + '+'.join(subflags) - - flags = filtered + [cur_flag] - if i > 0: - flags += upper_flags - break - - # to remove overridable flags - final_flags = [] - matched = set() - for f in reversed(flags): - match = re.match(self._cc_normalize_unix_krgx, f) - if not match: - pass - elif match[0] in matched: - continue - else: - matched.add(match[0]) - final_flags.insert(0, f) - return final_flags - - _cc_normalize_win_frgx = re.compile( - r"^(?!(/arch\:|/Qx\:))" - ) - _cc_normalize_win_mrgx = re.compile( - r"^(/arch|/Qx:)" - ) - def _cc_normalize_win(self, flags): - for i, f in enumerate(reversed(flags)): - if not re.match(self._cc_normalize_win_mrgx, f): - continue - i += 1 - return list(filter( - self._cc_normalize_win_frgx.search, flags[:-i] - )) + flags[-i:] - return flags - -class _Feature: - """A helper class for `CCompilerOpt` that managing CPU features. - - Attributes - ---------- - feature_supported : dict - Dictionary containing all CPU features that supported - by the platform, according to the specified values in attribute - `_Config.conf_features` and `_Config.conf_features_partial()` - - feature_min : set - The minimum support of CPU features, according to - the specified values in attribute `_Config.conf_min_features`. - """ - def __init__(self): - if hasattr(self, "feature_is_cached"): - return - self.feature_supported = pfeatures = self.conf_features_partial() - for feature_name in list(pfeatures.keys()): - feature = pfeatures[feature_name] - cfeature = self.conf_features[feature_name] - feature.update({ - k:v for k,v in cfeature.items() if k not in feature - }) - disabled = feature.get("disable") - if disabled is not None: - pfeatures.pop(feature_name) - self.dist_log( - "feature '%s' is disabled," % feature_name, - disabled, stderr=True - ) - continue - # list is used internally for these options - for option in ( - "implies", "group", "detect", "headers", "flags", "extra_checks" - ) : - oval = feature.get(option) - if isinstance(oval, str): - feature[option] = oval.split() - - self.feature_min = set() - min_f = self.conf_min_features.get(self.cc_march, "") - for F in min_f.upper().split(): - if F in self.feature_supported: - self.feature_min.add(F) - - self.feature_is_cached = True - - def feature_names(self, names=None, force_flags=None, macros=[]): - """ - Returns a set of CPU feature names that supported by platform and the **C** compiler. - - Parameters - ---------- - names : sequence or None, optional - Specify certain CPU features to test it against the **C** compiler. - if None(default), it will test all current supported features. - **Note**: feature names must be in upper-case. - - force_flags : list or None, optional - If None(default), default compiler flags for every CPU feature will - be used during the test. - - macros : list of tuples, optional - A list of C macro definitions. - """ - assert( - names is None or ( - not isinstance(names, str) and - hasattr(names, "__iter__") - ) - ) - assert(force_flags is None or isinstance(force_flags, list)) - if names is None: - names = self.feature_supported.keys() - supported_names = set() - for f in names: - if self.feature_is_supported( - f, force_flags=force_flags, macros=macros - ): - supported_names.add(f) - return supported_names - - def feature_is_exist(self, name): - """ - Returns True if a certain feature is exist and covered within - ``_Config.conf_features``. - - Parameters - ---------- - 'name': str - feature name in uppercase. - """ - assert(name.isupper()) - return name in self.conf_features - - def feature_sorted(self, names, reverse=False): - """ - Sort a list of CPU features ordered by the lowest interest. - - Parameters - ---------- - 'names': sequence - sequence of supported feature names in uppercase. - 'reverse': bool, optional - If true, the sorted features is reversed. (highest interest) - - Returns - ------- - list, sorted CPU features - """ - def sort_cb(k): - if isinstance(k, str): - return self.feature_supported[k]["interest"] - # multiple features - rank = max([self.feature_supported[f]["interest"] for f in k]) - # FIXME: that's not a safe way to increase the rank for - # multi targets - rank += len(k) -1 - return rank - return sorted(names, reverse=reverse, key=sort_cb) - - def feature_implies(self, names, keep_origins=False): - """ - Return a set of CPU features that implied by 'names' - - Parameters - ---------- - names : str or sequence of str - CPU feature name(s) in uppercase. - - keep_origins : bool - if False(default) then the returned set will not contain any - features from 'names'. This case happens only when two features - imply each other. - - Examples - -------- - >>> self.feature_implies("SSE3") - {'SSE', 'SSE2'} - >>> self.feature_implies("SSE2") - {'SSE'} - >>> self.feature_implies("SSE2", keep_origins=True) - # 'SSE2' found here since 'SSE' and 'SSE2' imply each other - {'SSE', 'SSE2'} - """ - def get_implies(name, _caller=set()): - implies = set() - d = self.feature_supported[name] - for i in d.get("implies", []): - implies.add(i) - if i in _caller: - # infinity recursive guard since - # features can imply each other - continue - _caller.add(name) - implies = implies.union(get_implies(i, _caller)) - return implies - - if isinstance(names, str): - implies = get_implies(names) - names = [names] - else: - assert(hasattr(names, "__iter__")) - implies = set() - for n in names: - implies = implies.union(get_implies(n)) - if not keep_origins: - implies.difference_update(names) - return implies - - def feature_implies_c(self, names): - """same as feature_implies() but combining 'names'""" - if isinstance(names, str): - names = set((names,)) - else: - names = set(names) - return names.union(self.feature_implies(names)) - - def feature_ahead(self, names): - """ - Return list of features in 'names' after remove any - implied features and keep the origins. - - Parameters - ---------- - 'names': sequence - sequence of CPU feature names in uppercase. - - Returns - ------- - list of CPU features sorted as-is 'names' - - Examples - -------- - >>> self.feature_ahead(["SSE2", "SSE3", "SSE41"]) - ["SSE41"] - # assume AVX2 and FMA3 implies each other and AVX2 - # is the highest interest - >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) - ["AVX2"] - # assume AVX2 and FMA3 don't implies each other - >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) - ["AVX2", "FMA3"] - """ - assert( - not isinstance(names, str) - and hasattr(names, '__iter__') - ) - implies = self.feature_implies(names, keep_origins=True) - ahead = [n for n in names if n not in implies] - if len(ahead) == 0: - # return the highest interested feature - # if all features imply each other - ahead = self.feature_sorted(names, reverse=True)[:1] - return ahead - - def feature_untied(self, names): - """ - same as 'feature_ahead()' but if both features implied each other - and keep the highest interest. - - Parameters - ---------- - 'names': sequence - sequence of CPU feature names in uppercase. - - Returns - ------- - list of CPU features sorted as-is 'names' - - Examples - -------- - >>> self.feature_untied(["SSE2", "SSE3", "SSE41"]) - ["SSE2", "SSE3", "SSE41"] - # assume AVX2 and FMA3 implies each other - >>> self.feature_untied(["SSE2", "SSE3", "SSE41", "FMA3", "AVX2"]) - ["SSE2", "SSE3", "SSE41", "AVX2"] - """ - assert( - not isinstance(names, str) - and hasattr(names, '__iter__') - ) - final = [] - for n in names: - implies = self.feature_implies(n) - tied = [ - nn for nn in final - if nn in implies and n in self.feature_implies(nn) - ] - if tied: - tied = self.feature_sorted(tied + [n]) - if n not in tied[1:]: - continue - final.remove(tied[:1][0]) - final.append(n) - return final - - def feature_get_til(self, names, keyisfalse): - """ - same as `feature_implies_c()` but stop collecting implied - features when feature's option that provided through - parameter 'keyisfalse' is False, also sorting the returned - features. - """ - def til(tnames): - # sort from highest to lowest interest then cut if "key" is False - tnames = self.feature_implies_c(tnames) - tnames = self.feature_sorted(tnames, reverse=True) - for i, n in enumerate(tnames): - if not self.feature_supported[n].get(keyisfalse, True): - tnames = tnames[:i+1] - break - return tnames - - if isinstance(names, str) or len(names) <= 1: - names = til(names) - # normalize the sort - names.reverse() - return names - - names = self.feature_ahead(names) - names = {t for n in names for t in til(n)} - return self.feature_sorted(names) - - def feature_detect(self, names): - """ - Return a list of CPU features that required to be detected - sorted from the lowest to highest interest. - """ - names = self.feature_get_til(names, "implies_detect") - detect = [] - for n in names: - d = self.feature_supported[n] - detect += d.get("detect", d.get("group", [n])) - return detect - - @_Cache.me - def feature_flags(self, names): - """ - Return a list of CPU features flags sorted from the lowest - to highest interest. - """ - names = self.feature_sorted(self.feature_implies_c(names)) - flags = [] - for n in names: - d = self.feature_supported[n] - f = d.get("flags", []) - if not f or not self.cc_test_flags(f): - continue - flags += f - return self.cc_normalize_flags(flags) - - @_Cache.me - def feature_test(self, name, force_flags=None, macros=[]): - """ - Test a certain CPU feature against the compiler through its own - check file. - - Parameters - ---------- - name : str - Supported CPU feature name. - - force_flags : list or None, optional - If None(default), the returned flags from `feature_flags()` - will be used. - - macros : list of tuples, optional - A list of C macro definitions. - """ - if force_flags is None: - force_flags = self.feature_flags(name) - - self.dist_log( - "testing feature '%s' with flags (%s)" % ( - name, ' '.join(force_flags) - )) - # Each CPU feature must have C source code contains at - # least one intrinsic or instruction related to this feature. - test_path = os.path.join( - self.conf_check_path, "cpu_%s.c" % name.lower() - ) - if not os.path.exists(test_path): - self.dist_fatal("feature test file is not exist", test_path) - - test = self.dist_test( - test_path, force_flags + self.cc_flags["werror"], macros=macros - ) - if not test: - self.dist_log("testing failed", stderr=True) - return test - - @_Cache.me - def feature_is_supported(self, name, force_flags=None, macros=[]): - """ - Check if a certain CPU feature is supported by the platform and compiler. - - Parameters - ---------- - name : str - CPU feature name in uppercase. - - force_flags : list or None, optional - If None(default), default compiler flags for every CPU feature will - be used during test. - - macros : list of tuples, optional - A list of C macro definitions. - """ - assert(name.isupper()) - assert(force_flags is None or isinstance(force_flags, list)) - - supported = name in self.feature_supported - if supported: - for impl in self.feature_implies(name): - if not self.feature_test(impl, force_flags, macros=macros): - return False - if not self.feature_test(name, force_flags, macros=macros): - return False - return supported - - @_Cache.me - def feature_can_autovec(self, name): - """ - check if the feature can be auto-vectorized by the compiler - """ - assert(isinstance(name, str)) - d = self.feature_supported[name] - can = d.get("autovec", None) - if can is None: - valid_flags = [ - self.cc_test_flags([f]) for f in d.get("flags", []) - ] - can = valid_flags and any(valid_flags) - return can - - @_Cache.me - def feature_extra_checks(self, name): - """ - Return a list of supported extra checks after testing them against - the compiler. - - Parameters - ---------- - names : str - CPU feature name in uppercase. - """ - assert isinstance(name, str) - d = self.feature_supported[name] - extra_checks = d.get("extra_checks", []) - if not extra_checks: - return [] - - self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks) - flags = self.feature_flags(name) - available = [] - not_available = [] - for chk in extra_checks: - test_path = os.path.join( - self.conf_check_path, "extra_%s.c" % chk.lower() - ) - if not os.path.exists(test_path): - self.dist_fatal("extra check file does not exist", test_path) - - is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"]) - if is_supported: - available.append(chk) - else: - not_available.append(chk) - - if not_available: - self.dist_log("testing failed for checks", not_available, stderr=True) - return available - - - def feature_c_preprocessor(self, feature_name, tabs=0): - """ - Generate C preprocessor definitions and include headers of a CPU feature. - - Parameters - ---------- - 'feature_name': str - CPU feature name in uppercase. - 'tabs': int - if > 0, align the generated strings to the right depend on number of tabs. - - Returns - ------- - str, generated C preprocessor - - Examples - -------- - >>> self.feature_c_preprocessor("SSE3") - /** SSE3 **/ - #define NPY_HAVE_SSE3 1 - #include - """ - assert(feature_name.isupper()) - feature = self.feature_supported.get(feature_name) - assert(feature is not None) - - prepr = [ - "/** %s **/" % feature_name, - "#define %sHAVE_%s 1" % (self.conf_c_prefix, feature_name) - ] - prepr += [ - "#include <%s>" % h for h in feature.get("headers", []) - ] - - extra_defs = feature.get("group", []) - extra_defs += self.feature_extra_checks(feature_name) - for edef in extra_defs: - # Guard extra definitions in case of duplicate with - # another feature - prepr += [ - "#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef), - "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef), - "#endif", - ] - - if tabs > 0: - prepr = [('\t'*tabs) + l for l in prepr] - return '\n'.join(prepr) - -class _Parse: - """A helper class that parsing main arguments of `CCompilerOpt`, - also parsing configuration statements in dispatch-able sources. - - Parameters - ---------- - cpu_baseline : str or None - minimal set of required CPU features or special options. - - cpu_dispatch : str or None - dispatched set of additional CPU features or special options. - - Special options can be: - - **MIN**: Enables the minimum CPU features that utilized via `_Config.conf_min_features` - - **MAX**: Enables all supported CPU features by the Compiler and platform. - - **NATIVE**: Enables all CPU features that supported by the current machine. - - **NONE**: Enables nothing - - **Operand +/-**: remove or add features, useful with options **MAX**, **MIN** and **NATIVE**. - NOTE: operand + is only added for nominal reason. - - NOTES: - - Case-insensitive among all CPU features and special options. - - Comma or space can be used as a separator. - - If the CPU feature is not supported by the user platform or compiler, - it will be skipped rather than raising a fatal error. - - Any specified CPU features to 'cpu_dispatch' will be skipped if its part of CPU baseline features - - 'cpu_baseline' force enables implied features. - - Attributes - ---------- - parse_baseline_names : list - Final CPU baseline's feature names(sorted from low to high) - parse_baseline_flags : list - Compiler flags of baseline features - parse_dispatch_names : list - Final CPU dispatch-able feature names(sorted from low to high) - parse_target_groups : dict - Dictionary containing initialized target groups that configured - through class attribute `conf_target_groups`. - - The key is represent the group name and value is a tuple - contains three items : - - bool, True if group has the 'baseline' option. - - list, list of CPU features. - - list, list of extra compiler flags. - - """ - def __init__(self, cpu_baseline, cpu_dispatch): - self._parse_policies = dict( - # POLICY NAME, (HAVE, NOT HAVE, [DEB]) - KEEP_BASELINE = ( - None, self._parse_policy_not_keepbase, - [] - ), - KEEP_SORT = ( - self._parse_policy_keepsort, - self._parse_policy_not_keepsort, - [] - ), - MAXOPT = ( - self._parse_policy_maxopt, None, - [] - ), - WERROR = ( - self._parse_policy_werror, None, - [] - ), - AUTOVEC = ( - self._parse_policy_autovec, None, - ["MAXOPT"] - ) - ) - if hasattr(self, "parse_is_cached"): - return - - self.parse_baseline_names = [] - self.parse_baseline_flags = [] - self.parse_dispatch_names = [] - self.parse_target_groups = {} - - if self.cc_noopt: - # skip parsing baseline and dispatch args and keep parsing target groups - cpu_baseline = cpu_dispatch = None - - self.dist_log("check requested baseline") - if cpu_baseline is not None: - cpu_baseline = self._parse_arg_features("cpu_baseline", cpu_baseline) - baseline_names = self.feature_names(cpu_baseline) - self.parse_baseline_flags = self.feature_flags(baseline_names) - self.parse_baseline_names = self.feature_sorted( - self.feature_implies_c(baseline_names) - ) - - self.dist_log("check requested dispatch-able features") - if cpu_dispatch is not None: - cpu_dispatch_ = self._parse_arg_features("cpu_dispatch", cpu_dispatch) - cpu_dispatch = { - f for f in cpu_dispatch_ - if f not in self.parse_baseline_names - } - conflict_baseline = cpu_dispatch_.difference(cpu_dispatch) - self.parse_dispatch_names = self.feature_sorted( - self.feature_names(cpu_dispatch) - ) - if len(conflict_baseline) > 0: - self.dist_log( - "skip features", conflict_baseline, "since its part of baseline" - ) - - self.dist_log("initialize targets groups") - for group_name, tokens in self.conf_target_groups.items(): - self.dist_log("parse target group", group_name) - GROUP_NAME = group_name.upper() - if not tokens or not tokens.strip(): - # allow empty groups, useful in case if there's a need - # to disable certain group since '_parse_target_tokens()' - # requires at least one valid target - self.parse_target_groups[GROUP_NAME] = ( - False, [], [] - ) - continue - has_baseline, features, extra_flags = \ - self._parse_target_tokens(tokens) - self.parse_target_groups[GROUP_NAME] = ( - has_baseline, features, extra_flags - ) - - self.parse_is_cached = True - - def parse_targets(self, source): - """ - Fetch and parse configuration statements that required for - defining the targeted CPU features, statements should be declared - in the top of source in between **C** comment and start - with a special mark **@targets**. - - Configuration statements are sort of keywords representing - CPU features names, group of statements and policies, combined - together to determine the required optimization. - - Parameters - ---------- - source : str - the path of **C** source file. - - Returns - ------- - - bool, True if group has the 'baseline' option - - list, list of CPU features - - list, list of extra compiler flags - """ - self.dist_log("looking for '@targets' inside -> ", source) - # get lines between /*@targets and */ - with open(source) as fd: - tokens = "" - max_to_reach = 1000 # good enough, isn't? - start_with = "@targets" - start_pos = -1 - end_with = "*/" - end_pos = -1 - for current_line, line in enumerate(fd): - if current_line == max_to_reach: - self.dist_fatal("reached the max of lines") - break - if start_pos == -1: - start_pos = line.find(start_with) - if start_pos == -1: - continue - start_pos += len(start_with) - tokens += line - end_pos = line.find(end_with) - if end_pos != -1: - end_pos += len(tokens) - len(line) - break - - if start_pos == -1: - self.dist_fatal("expected to find '%s' within a C comment" % start_with) - if end_pos == -1: - self.dist_fatal("expected to end with '%s'" % end_with) - - tokens = tokens[start_pos:end_pos] - return self._parse_target_tokens(tokens) - - _parse_regex_arg = re.compile(r'\s|,|([+-])') - def _parse_arg_features(self, arg_name, req_features): - if not isinstance(req_features, str): - self.dist_fatal("expected a string in '%s'" % arg_name) - - final_features = set() - # space and comma can be used as a separator - tokens = list(filter(None, re.split(self._parse_regex_arg, req_features))) - append = True # append is the default - for tok in tokens: - if tok[0] in ("#", "$"): - self.dist_fatal( - arg_name, "target groups and policies " - "aren't allowed from arguments, " - "only from dispatch-able sources" - ) - if tok == '+': - append = True - continue - if tok == '-': - append = False - continue - - TOK = tok.upper() # we use upper-case internally - features_to = set() - if TOK == "NONE": - pass - elif TOK == "NATIVE": - native = self.cc_flags["native"] - if not native: - self.dist_fatal(arg_name, - "native option isn't supported by the compiler" - ) - features_to = self.feature_names( - force_flags=native, macros=[("DETECT_FEATURES", 1)] - ) - elif TOK == "MAX": - features_to = self.feature_supported.keys() - elif TOK == "MIN": - features_to = self.feature_min - else: - if TOK in self.feature_supported: - features_to.add(TOK) - else: - if not self.feature_is_exist(TOK): - self.dist_fatal(arg_name, - ", '%s' isn't a known feature or option" % tok - ) - if append: - final_features = final_features.union(features_to) - else: - final_features = final_features.difference(features_to) - - append = True # back to default - - return final_features - - _parse_regex_target = re.compile(r'\s|[*,/]|([()])') - def _parse_target_tokens(self, tokens): - assert(isinstance(tokens, str)) - final_targets = [] # to keep it sorted as specified - extra_flags = [] - has_baseline = False - - skipped = set() - policies = set() - multi_target = None - - tokens = list(filter(None, re.split(self._parse_regex_target, tokens))) - if not tokens: - self.dist_fatal("expected one token at least") - - for tok in tokens: - TOK = tok.upper() - ch = tok[0] - if ch in ('+', '-'): - self.dist_fatal( - "+/- are 'not' allowed from target's groups or @targets, " - "only from cpu_baseline and cpu_dispatch parms" - ) - elif ch == '$': - if multi_target is not None: - self.dist_fatal( - "policies aren't allowed inside multi-target '()'" - ", only CPU features" - ) - policies.add(self._parse_token_policy(TOK)) - elif ch == '#': - if multi_target is not None: - self.dist_fatal( - "target groups aren't allowed inside multi-target '()'" - ", only CPU features" - ) - has_baseline, final_targets, extra_flags = \ - self._parse_token_group(TOK, has_baseline, final_targets, extra_flags) - elif ch == '(': - if multi_target is not None: - self.dist_fatal("unclosed multi-target, missing ')'") - multi_target = set() - elif ch == ')': - if multi_target is None: - self.dist_fatal("multi-target opener '(' wasn't found") - targets = self._parse_multi_target(multi_target) - if targets is None: - skipped.add(tuple(multi_target)) - else: - if len(targets) == 1: - targets = targets[0] - if targets and targets not in final_targets: - final_targets.append(targets) - multi_target = None # back to default - else: - if TOK == "BASELINE": - if multi_target is not None: - self.dist_fatal("baseline isn't allowed inside multi-target '()'") - has_baseline = True - continue - - if multi_target is not None: - multi_target.add(TOK) - continue - - if not self.feature_is_exist(TOK): - self.dist_fatal("invalid target name '%s'" % TOK) - - is_enabled = ( - TOK in self.parse_baseline_names or - TOK in self.parse_dispatch_names - ) - if is_enabled: - if TOK not in final_targets: - final_targets.append(TOK) - continue - - skipped.add(TOK) - - if multi_target is not None: - self.dist_fatal("unclosed multi-target, missing ')'") - if skipped: - self.dist_log( - "skip targets", skipped, - "not part of baseline or dispatch-able features" - ) - - final_targets = self.feature_untied(final_targets) - - # add polices dependencies - for p in list(policies): - _, _, deps = self._parse_policies[p] - for d in deps: - if d in policies: - continue - self.dist_log( - "policy '%s' force enables '%s'" % ( - p, d - )) - policies.add(d) - - # release policies filtrations - for p, (have, nhave, _) in self._parse_policies.items(): - func = None - if p in policies: - func = have - self.dist_log("policy '%s' is ON" % p) - else: - func = nhave - if not func: - continue - has_baseline, final_targets, extra_flags = func( - has_baseline, final_targets, extra_flags - ) - - return has_baseline, final_targets, extra_flags - - def _parse_token_policy(self, token): - """validate policy token""" - if len(token) <= 1 or token[-1:] == token[0]: - self.dist_fatal("'$' must stuck in the begin of policy name") - token = token[1:] - if token not in self._parse_policies: - self.dist_fatal( - "'%s' is an invalid policy name, available policies are" % token, - self._parse_policies.keys() - ) - return token - - def _parse_token_group(self, token, has_baseline, final_targets, extra_flags): - """validate group token""" - if len(token) <= 1 or token[-1:] == token[0]: - self.dist_fatal("'#' must stuck in the begin of group name") - - token = token[1:] - ghas_baseline, gtargets, gextra_flags = self.parse_target_groups.get( - token, (False, None, []) - ) - if gtargets is None: - self.dist_fatal( - "'%s' is an invalid target group name, " % token + \ - "available target groups are", - self.parse_target_groups.keys() - ) - if ghas_baseline: - has_baseline = True - # always keep sorting as specified - final_targets += [f for f in gtargets if f not in final_targets] - extra_flags += [f for f in gextra_flags if f not in extra_flags] - return has_baseline, final_targets, extra_flags - - def _parse_multi_target(self, targets): - """validate multi targets that defined between parentheses()""" - # remove any implied features and keep the origins - if not targets: - self.dist_fatal("empty multi-target '()'") - if not all([ - self.feature_is_exist(tar) for tar in targets - ]) : - self.dist_fatal("invalid target name in multi-target", targets) - if not all([ - ( - tar in self.parse_baseline_names or - tar in self.parse_dispatch_names - ) - for tar in targets - ]) : - return None - targets = self.feature_ahead(targets) - if not targets: - return None - # force sort multi targets, so it can be comparable - targets = self.feature_sorted(targets) - targets = tuple(targets) # hashable - return targets - - def _parse_policy_not_keepbase(self, has_baseline, final_targets, extra_flags): - """skip all baseline features""" - skipped = [] - for tar in final_targets[:]: - is_base = False - if isinstance(tar, str): - is_base = tar in self.parse_baseline_names - else: - # multi targets - is_base = all([ - f in self.parse_baseline_names - for f in tar - ]) - if is_base: - skipped.append(tar) - final_targets.remove(tar) - - if skipped: - self.dist_log("skip baseline features", skipped) - - return has_baseline, final_targets, extra_flags - - def _parse_policy_keepsort(self, has_baseline, final_targets, extra_flags): - """leave a notice that $keep_sort is on""" - self.dist_log( - "policy 'keep_sort' is on, dispatch-able targets", final_targets, "\n" - "are 'not' sorted depend on the highest interest but" - "as specified in the dispatch-able source or the extra group" - ) - return has_baseline, final_targets, extra_flags - - def _parse_policy_not_keepsort(self, has_baseline, final_targets, extra_flags): - """sorted depend on the highest interest""" - final_targets = self.feature_sorted(final_targets, reverse=True) - return has_baseline, final_targets, extra_flags - - def _parse_policy_maxopt(self, has_baseline, final_targets, extra_flags): - """append the compiler optimization flags""" - if self.cc_has_debug: - self.dist_log("debug mode is detected, policy 'maxopt' is skipped.") - elif self.cc_noopt: - self.dist_log("optimization is disabled, policy 'maxopt' is skipped.") - else: - flags = self.cc_flags["opt"] - if not flags: - self.dist_log( - "current compiler doesn't support optimization flags, " - "policy 'maxopt' is skipped", stderr=True - ) - else: - extra_flags += flags - return has_baseline, final_targets, extra_flags - - def _parse_policy_werror(self, has_baseline, final_targets, extra_flags): - """force warnings to treated as errors""" - flags = self.cc_flags["werror"] - if not flags: - self.dist_log( - "current compiler doesn't support werror flags, " - "warnings will 'not' treated as errors", stderr=True - ) - else: - self.dist_log("compiler warnings are treated as errors") - extra_flags += flags - return has_baseline, final_targets, extra_flags - - def _parse_policy_autovec(self, has_baseline, final_targets, extra_flags): - """skip features that has no auto-vectorized support by compiler""" - skipped = [] - for tar in final_targets[:]: - if isinstance(tar, str): - can = self.feature_can_autovec(tar) - else: # multiple target - can = all([ - self.feature_can_autovec(t) - for t in tar - ]) - if not can: - final_targets.remove(tar) - skipped.append(tar) - - if skipped: - self.dist_log("skip non auto-vectorized features", skipped) - - return has_baseline, final_targets, extra_flags - -class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse): - """ - A helper class for `CCompiler` aims to provide extra build options - to effectively control of compiler optimizations that are directly - related to CPU features. - """ - def __init__(self, ccompiler, cpu_baseline="min", cpu_dispatch="max", cache_path=None): - _Config.__init__(self) - _Distutils.__init__(self, ccompiler) - _Cache.__init__(self, cache_path, self.dist_info(), cpu_baseline, cpu_dispatch) - _CCompiler.__init__(self) - _Feature.__init__(self) - if not self.cc_noopt and self.cc_has_native: - self.dist_log( - "native flag is specified through environment variables. " - "force cpu-baseline='native'" - ) - cpu_baseline = "native" - _Parse.__init__(self, cpu_baseline, cpu_dispatch) - # keep the requested features untouched, need it later for report - # and trace purposes - self._requested_baseline = cpu_baseline - self._requested_dispatch = cpu_dispatch - # key is the dispatch-able source and value is a tuple - # contains two items (has_baseline[boolean], dispatched-features[list]) - self.sources_status = getattr(self, "sources_status", {}) - # every instance should has a separate one - self.cache_private.add("sources_status") - # set it at the end to make sure the cache writing was done after init - # this class - self.hit_cache = hasattr(self, "hit_cache") - - def is_cached(self): - """ - Returns True if the class loaded from the cache file - """ - return self.cache_infile and self.hit_cache - - def cpu_baseline_flags(self): - """ - Returns a list of final CPU baseline compiler flags - """ - return self.parse_baseline_flags - - def cpu_baseline_names(self): - """ - return a list of final CPU baseline feature names - """ - return self.parse_baseline_names - - def cpu_dispatch_names(self): - """ - return a list of final CPU dispatch feature names - """ - return self.parse_dispatch_names - - def try_dispatch(self, sources, src_dir=None, ccompiler=None, **kwargs): - """ - Compile one or more dispatch-able sources and generates object files, - also generates abstract C config headers and macros that - used later for the final runtime dispatching process. - - The mechanism behind it is to takes each source file that specified - in 'sources' and branching it into several files depend on - special configuration statements that must be declared in the - top of each source which contains targeted CPU features, - then it compiles every branched source with the proper compiler flags. - - Parameters - ---------- - sources : list - Must be a list of dispatch-able sources file paths, - and configuration statements must be declared inside - each file. - - src_dir : str - Path of parent directory for the generated headers and wrapped sources. - If None(default) the files will generated in-place. - - ccompiler : CCompiler - Distutils `CCompiler` instance to be used for compilation. - If None (default), the provided instance during the initialization - will be used instead. - - **kwargs : any - Arguments to pass on to the `CCompiler.compile()` - - Returns - ------- - list : generated object files - - Raises - ------ - CompileError - Raises by `CCompiler.compile()` on compiling failure. - DistutilsError - Some errors during checking the sanity of configuration statements. - - See Also - -------- - parse_targets : - Parsing the configuration statements of dispatch-able sources. - """ - to_compile = {} - baseline_flags = self.cpu_baseline_flags() - include_dirs = kwargs.setdefault("include_dirs", []) - - for src in sources: - output_dir = os.path.dirname(src) - if src_dir: - if not output_dir.startswith(src_dir): - output_dir = os.path.join(src_dir, output_dir) - if output_dir not in include_dirs: - # To allow including the generated config header(*.dispatch.h) - # by the dispatch-able sources - include_dirs.append(output_dir) - - has_baseline, targets, extra_flags = self.parse_targets(src) - nochange = self._generate_config(output_dir, src, targets, has_baseline) - for tar in targets: - tar_src = self._wrap_target(output_dir, src, tar, nochange=nochange) - flags = tuple(extra_flags + self.feature_flags(tar)) - to_compile.setdefault(flags, []).append(tar_src) - - if has_baseline: - flags = tuple(extra_flags + baseline_flags) - to_compile.setdefault(flags, []).append(src) - - self.sources_status[src] = (has_baseline, targets) - - # For these reasons, the sources are compiled in a separate loop: - # - Gathering all sources with the same flags to benefit from - # the parallel compiling as much as possible. - # - To generate all config headers of the dispatchable sources, - # before the compilation in case if there are dependency relationships - # among them. - objects = [] - for flags, srcs in to_compile.items(): - objects += self.dist_compile( - srcs, list(flags), ccompiler=ccompiler, **kwargs - ) - return objects - - def generate_dispatch_header(self, header_path): - """ - Generate the dispatch header which contains the #definitions and headers - for platform-specific instruction-sets for the enabled CPU baseline and - dispatch-able features. - - Its highly recommended to take a look at the generated header - also the generated source files via `try_dispatch()` - in order to get the full picture. - """ - self.dist_log("generate CPU dispatch header: (%s)" % header_path) - - baseline_names = self.cpu_baseline_names() - dispatch_names = self.cpu_dispatch_names() - baseline_len = len(baseline_names) - dispatch_len = len(dispatch_names) - - header_dir = os.path.dirname(header_path) - if not os.path.exists(header_dir): - self.dist_log( - f"dispatch header dir {header_dir} does not exist, creating it", - stderr=True - ) - os.makedirs(header_dir) - - with open(header_path, 'w') as f: - baseline_calls = ' \\\n'.join([ - ( - "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" - ) % (self.conf_c_prefix, f) - for f in baseline_names - ]) - dispatch_calls = ' \\\n'.join([ - ( - "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" - ) % (self.conf_c_prefix, f) - for f in dispatch_names - ]) - f.write(textwrap.dedent("""\ - /* - * AUTOGENERATED DON'T EDIT - * Please make changes to the code generator (distutils/ccompiler_opt.py) - */ - #define {pfx}WITH_CPU_BASELINE "{baseline_str}" - #define {pfx}WITH_CPU_DISPATCH "{dispatch_str}" - #define {pfx}WITH_CPU_BASELINE_N {baseline_len} - #define {pfx}WITH_CPU_DISPATCH_N {dispatch_len} - #define {pfx}WITH_CPU_EXPAND_(X) X - #define {pfx}WITH_CPU_BASELINE_CALL(MACRO_TO_CALL, ...) \\ - {baseline_calls} - #define {pfx}WITH_CPU_DISPATCH_CALL(MACRO_TO_CALL, ...) \\ - {dispatch_calls} - """).format( - pfx=self.conf_c_prefix, baseline_str=" ".join(baseline_names), - dispatch_str=" ".join(dispatch_names), baseline_len=baseline_len, - dispatch_len=dispatch_len, baseline_calls=baseline_calls, - dispatch_calls=dispatch_calls - )) - baseline_pre = '' - for name in baseline_names: - baseline_pre += self.feature_c_preprocessor(name, tabs=1) + '\n' - - dispatch_pre = '' - for name in dispatch_names: - dispatch_pre += textwrap.dedent("""\ - #ifdef {pfx}CPU_TARGET_{name} - {pre} - #endif /*{pfx}CPU_TARGET_{name}*/ - """).format( - pfx=self.conf_c_prefix_, name=name, pre=self.feature_c_preprocessor( - name, tabs=1 - )) - - f.write(textwrap.dedent("""\ - /******* baseline features *******/ - {baseline_pre} - /******* dispatch features *******/ - {dispatch_pre} - """).format( - pfx=self.conf_c_prefix_, baseline_pre=baseline_pre, - dispatch_pre=dispatch_pre - )) - - def report(self, full=False): - report = [] - platform_rows = [] - baseline_rows = [] - dispatch_rows = [] - report.append(("Platform", platform_rows)) - report.append(("", "")) - report.append(("CPU baseline", baseline_rows)) - report.append(("", "")) - report.append(("CPU dispatch", dispatch_rows)) - - ########## platform ########## - platform_rows.append(("Architecture", ( - "unsupported" if self.cc_on_noarch else self.cc_march) - )) - platform_rows.append(("Compiler", ( - "unix-like" if self.cc_is_nocc else self.cc_name) - )) - ########## baseline ########## - if self.cc_noopt: - baseline_rows.append(("Requested", "optimization disabled")) - else: - baseline_rows.append(("Requested", repr(self._requested_baseline))) - - baseline_names = self.cpu_baseline_names() - baseline_rows.append(( - "Enabled", (' '.join(baseline_names) if baseline_names else "none") - )) - baseline_flags = self.cpu_baseline_flags() - baseline_rows.append(( - "Flags", (' '.join(baseline_flags) if baseline_flags else "none") - )) - extra_checks = [] - for name in baseline_names: - extra_checks += self.feature_extra_checks(name) - baseline_rows.append(( - "Extra checks", (' '.join(extra_checks) if extra_checks else "none") - )) - - ########## dispatch ########## - if self.cc_noopt: - baseline_rows.append(("Requested", "optimization disabled")) - else: - dispatch_rows.append(("Requested", repr(self._requested_dispatch))) - - dispatch_names = self.cpu_dispatch_names() - dispatch_rows.append(( - "Enabled", (' '.join(dispatch_names) if dispatch_names else "none") - )) - ########## Generated ########## - # TODO: - # - collect object names from 'try_dispatch()' - # then get size of each object and printed - # - give more details about the features that not - # generated due compiler support - # - find a better output's design. - # - target_sources = {} - for source, (_, targets) in self.sources_status.items(): - for tar in targets: - target_sources.setdefault(tar, []).append(source) - - if not full or not target_sources: - generated = "" - for tar in self.feature_sorted(target_sources): - sources = target_sources[tar] - name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) - generated += name + "[%d] " % len(sources) - dispatch_rows.append(("Generated", generated[:-1] if generated else "none")) - else: - dispatch_rows.append(("Generated", '')) - for tar in self.feature_sorted(target_sources): - sources = target_sources[tar] - pretty_name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) - flags = ' '.join(self.feature_flags(tar)) - implies = ' '.join(self.feature_sorted(self.feature_implies(tar))) - detect = ' '.join(self.feature_detect(tar)) - extra_checks = [] - for name in ((tar,) if isinstance(tar, str) else tar): - extra_checks += self.feature_extra_checks(name) - extra_checks = (' '.join(extra_checks) if extra_checks else "none") - - dispatch_rows.append(('', '')) - dispatch_rows.append((pretty_name, implies)) - dispatch_rows.append(("Flags", flags)) - dispatch_rows.append(("Extra checks", extra_checks)) - dispatch_rows.append(("Detect", detect)) - for src in sources: - dispatch_rows.append(("", src)) - - ############################### - # TODO: add support for 'markdown' format - text = [] - secs_len = [len(secs) for secs, _ in report] - cols_len = [len(col) for _, rows in report for col, _ in rows] - tab = ' ' * 2 - pad = max(max(secs_len), max(cols_len)) - for sec, rows in report: - if not sec: - text.append("") # empty line - continue - sec += ' ' * (pad - len(sec)) - text.append(sec + tab + ': ') - for col, val in rows: - col += ' ' * (pad - len(col)) - text.append(tab + col + ': ' + val) - - return '\n'.join(text) - - def _wrap_target(self, output_dir, dispatch_src, target, nochange=False): - assert(isinstance(target, (str, tuple))) - if isinstance(target, str): - ext_name = target_name = target - else: - # multi-target - ext_name = '.'.join(target) - target_name = '__'.join(target) - - wrap_path = os.path.join(output_dir, os.path.basename(dispatch_src)) - wrap_path = "{0}.{2}{1}".format(*os.path.splitext(wrap_path), ext_name.lower()) - if nochange and os.path.exists(wrap_path): - return wrap_path - - self.dist_log("wrap dispatch-able target -> ", wrap_path) - # sorting for readability - features = self.feature_sorted(self.feature_implies_c(target)) - target_join = "#define %sCPU_TARGET_" % self.conf_c_prefix_ - target_defs = [target_join + f for f in features] - target_defs = '\n'.join(target_defs) - - with open(wrap_path, "w") as fd: - fd.write(textwrap.dedent("""\ - /** - * AUTOGENERATED DON'T EDIT - * Please make changes to the code generator \ - (distutils/ccompiler_opt.py) - */ - #define {pfx}CPU_TARGET_MODE - #define {pfx}CPU_TARGET_CURRENT {target_name} - {target_defs} - #include "{path}" - """).format( - pfx=self.conf_c_prefix_, target_name=target_name, - path=os.path.abspath(dispatch_src), target_defs=target_defs - )) - return wrap_path - - def _generate_config(self, output_dir, dispatch_src, targets, has_baseline=False): - config_path = os.path.basename(dispatch_src) - config_path = os.path.splitext(config_path)[0] + '.h' - config_path = os.path.join(output_dir, config_path) - # check if targets didn't change to avoid recompiling - cache_hash = self.cache_hash(targets, has_baseline) - try: - with open(config_path) as f: - last_hash = f.readline().split("cache_hash:") - if len(last_hash) == 2 and int(last_hash[1]) == cache_hash: - return True - except OSError: - pass - - os.makedirs(os.path.dirname(config_path), exist_ok=True) - - self.dist_log("generate dispatched config -> ", config_path) - dispatch_calls = [] - for tar in targets: - if isinstance(tar, str): - target_name = tar - else: # multi target - target_name = '__'.join([t for t in tar]) - req_detect = self.feature_detect(tar) - req_detect = '&&'.join([ - "CHK(%s)" % f for f in req_detect - ]) - dispatch_calls.append( - "\t%sCPU_DISPATCH_EXPAND_(CB((%s), %s, __VA_ARGS__))" % ( - self.conf_c_prefix_, req_detect, target_name - )) - dispatch_calls = ' \\\n'.join(dispatch_calls) - - if has_baseline: - baseline_calls = ( - "\t%sCPU_DISPATCH_EXPAND_(CB(__VA_ARGS__))" - ) % self.conf_c_prefix_ - else: - baseline_calls = '' - - with open(config_path, "w") as fd: - fd.write(textwrap.dedent("""\ - // cache_hash:{cache_hash} - /** - * AUTOGENERATED DON'T EDIT - * Please make changes to the code generator (distutils/ccompiler_opt.py) - */ - #ifndef {pfx}CPU_DISPATCH_EXPAND_ - #define {pfx}CPU_DISPATCH_EXPAND_(X) X - #endif - #undef {pfx}CPU_DISPATCH_BASELINE_CALL - #undef {pfx}CPU_DISPATCH_CALL - #define {pfx}CPU_DISPATCH_BASELINE_CALL(CB, ...) \\ - {baseline_calls} - #define {pfx}CPU_DISPATCH_CALL(CHK, CB, ...) \\ - {dispatch_calls} - """).format( - pfx=self.conf_c_prefix_, baseline_calls=baseline_calls, - dispatch_calls=dispatch_calls, cache_hash=cache_hash - )) - return False - -def new_ccompiler_opt(compiler, dispatch_hpath, **kwargs): - """ - Create a new instance of 'CCompilerOpt' and generate the dispatch header - which contains the #definitions and headers of platform-specific instruction-sets for - the enabled CPU baseline and dispatch-able features. - - Parameters - ---------- - compiler : CCompiler instance - dispatch_hpath : str - path of the dispatch header - - **kwargs: passed as-is to `CCompilerOpt(...)` - Returns - ------- - new instance of CCompilerOpt - """ - opt = CCompilerOpt(compiler, **kwargs) - if not os.path.exists(dispatch_hpath) or not opt.is_cached(): - opt.generate_dispatch_header(dispatch_hpath) - return opt diff --git a/numpy/distutils/command/__init__.py b/numpy/distutils/command/__init__.py deleted file mode 100644 index 3ba501de03b6..000000000000 --- a/numpy/distutils/command/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -"""distutils.command - -Package containing implementation of all the standard Distutils -commands. - -""" -def test_na_writable_attributes_deletion(): - a = np.NA(2) - attr = ['payload', 'dtype'] - for s in attr: - assert_raises(AttributeError, delattr, a, s) - - -__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $" - -distutils_all = [ #'build_py', - 'clean', - 'install_clib', - 'install_scripts', - 'bdist', - 'bdist_dumb', - 'bdist_wininst', - ] - -__import__('distutils.command', globals(), locals(), distutils_all) - -__all__ = ['build', - 'config_compiler', - 'config', - 'build_src', - 'build_py', - 'build_ext', - 'build_clib', - 'build_scripts', - 'install', - 'install_data', - 'install_headers', - 'install_lib', - 'bdist_rpm', - 'sdist', - ] + distutils_all diff --git a/numpy/distutils/command/autodist.py b/numpy/distutils/command/autodist.py deleted file mode 100644 index b72d0cab1a7d..000000000000 --- a/numpy/distutils/command/autodist.py +++ /dev/null @@ -1,148 +0,0 @@ -"""This module implements additional tests ala autoconf which can be useful. - -""" -import textwrap - -# We put them here since they could be easily reused outside numpy.distutils - -def check_inline(cmd): - """Return the inline identifier (may be empty).""" - cmd._check_compiler() - body = textwrap.dedent(""" - #ifndef __cplusplus - static %(inline)s int static_func (void) - { - return 0; - } - %(inline)s int nostatic_func (void) - { - return 0; - } - #endif""") - - for kw in ['inline', '__inline__', '__inline']: - st = cmd.try_compile(body % {'inline': kw}, None, None) - if st: - return kw - - return '' - - -def check_restrict(cmd): - """Return the restrict identifier (may be empty).""" - cmd._check_compiler() - body = textwrap.dedent(""" - static int static_func (char * %(restrict)s a) - { - return 0; - } - """) - - for kw in ['restrict', '__restrict__', '__restrict']: - st = cmd.try_compile(body % {'restrict': kw}, None, None) - if st: - return kw - - return '' - - -def check_compiler_gcc(cmd): - """Check if the compiler is GCC.""" - - cmd._check_compiler() - body = textwrap.dedent(""" - int - main() - { - #if (! defined __GNUC__) - #error gcc required - #endif - return 0; - } - """) - return cmd.try_compile(body, None, None) - - -def check_gcc_version_at_least(cmd, major, minor=0, patchlevel=0): - """ - Check that the gcc version is at least the specified version.""" - - cmd._check_compiler() - version = '.'.join([str(major), str(minor), str(patchlevel)]) - body = textwrap.dedent(""" - int - main() - { - #if (! defined __GNUC__) || (__GNUC__ < %(major)d) || \\ - (__GNUC_MINOR__ < %(minor)d) || \\ - (__GNUC_PATCHLEVEL__ < %(patchlevel)d) - #error gcc >= %(version)s required - #endif - return 0; - } - """) - kw = {'version': version, 'major': major, 'minor': minor, - 'patchlevel': patchlevel} - - return cmd.try_compile(body % kw, None, None) - - -def check_gcc_function_attribute(cmd, attribute, name): - """Return True if the given function attribute is supported.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #pragma GCC diagnostic error "-Wattributes" - #pragma clang diagnostic error "-Wattributes" - - int %s %s(void* unused) - { - return 0; - } - - int - main() - { - return 0; - } - """) % (attribute, name) - return cmd.try_compile(body, None, None) != 0 - - -def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code, - include): - """Return True if the given function attribute is supported with - intrinsics.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #include<%s> - int %s %s(void) - { - %s; - return 0; - } - - int - main() - { - return 0; - } - """) % (include, attribute, name, code) - return cmd.try_compile(body, None, None) != 0 - - -def check_gcc_variable_attribute(cmd, attribute): - """Return True if the given variable attribute is supported.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #pragma GCC diagnostic error "-Wattributes" - #pragma clang diagnostic error "-Wattributes" - - int %s foo; - - int - main() - { - return 0; - } - """) % (attribute, ) - return cmd.try_compile(body, None, None) != 0 diff --git a/numpy/distutils/command/bdist_rpm.py b/numpy/distutils/command/bdist_rpm.py deleted file mode 100644 index 682e7a8eb8e2..000000000000 --- a/numpy/distutils/command/bdist_rpm.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import sys -if 'setuptools' in sys.modules: - from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm -else: - from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm - -class bdist_rpm(old_bdist_rpm): - - def _make_spec_file(self): - spec_file = old_bdist_rpm._make_spec_file(self) - - # Replace hardcoded setup.py script name - # with the real setup script name. - setup_py = os.path.basename(sys.argv[0]) - if setup_py == 'setup.py': - return spec_file - new_spec_file = [] - for line in spec_file: - line = line.replace('setup.py', setup_py) - new_spec_file.append(line) - return new_spec_file diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py deleted file mode 100644 index 80830d559c61..000000000000 --- a/numpy/distutils/command/build.py +++ /dev/null @@ -1,62 +0,0 @@ -import os -import sys -from distutils.command.build import build as old_build -from distutils.util import get_platform -from numpy.distutils.command.config_compiler import show_fortran_compilers - -class build(old_build): - - sub_commands = [('config_cc', lambda *args: True), - ('config_fc', lambda *args: True), - ('build_src', old_build.has_ext_modules), - ] + old_build.sub_commands - - user_options = old_build.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ('cpu-baseline=', None, - "specify a list of enabled baseline CPU optimizations"), - ('cpu-dispatch=', None, - "specify a list of dispatched CPU optimizations"), - ('disable-optimization', None, - "disable CPU optimized code(dispatch,simd,fast...)"), - ('simd-test=', None, - "specify a list of CPU optimizations to be tested against NumPy SIMD interface"), - ] - - help_options = old_build.help_options + [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - def initialize_options(self): - old_build.initialize_options(self) - self.fcompiler = None - self.warn_error = False - self.cpu_baseline = "min" - self.cpu_dispatch = "max -xop -fma4" # drop AMD legacy features by default - self.disable_optimization = False - """ - the '_simd' module is a very large. Adding more dispatched features - will increase binary size and compile time. By default we minimize - the targeted features to those most commonly used by the NumPy SIMD interface(NPYV), - NOTE: any specified features will be ignored if they're: - - part of the baseline(--cpu-baseline) - - not part of dispatch-able features(--cpu-dispatch) - - not supported by compiler or platform - """ - self.simd_test = "BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F " \ - "AVX512_SKX VSX VSX2 VSX3 VSX4 NEON ASIMD VX VXE VXE2" - - def finalize_options(self): - build_scripts = self.build_scripts - old_build.finalize_options(self) - plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) - if build_scripts is None: - self.build_scripts = os.path.join(self.build_base, - 'scripts' + plat_specifier) - - def run(self): - old_build.run(self) diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py deleted file mode 100644 index 26e2f4ed0f4a..000000000000 --- a/numpy/distutils/command/build_clib.py +++ /dev/null @@ -1,469 +0,0 @@ -""" Modified version of build_clib that handles fortran source files. -""" -import os -from glob import glob -import shutil -from distutils.command.build_clib import build_clib as old_build_clib -from distutils.errors import DistutilsSetupError, DistutilsError, \ - DistutilsFileError - -from numpy.distutils import log -from distutils.dep_util import newer_group -from numpy.distutils.misc_util import ( - filter_sources, get_lib_source_files, get_numpy_include_dirs, - has_cxx_sources, has_f_sources, is_sequence -) -from numpy.distutils.ccompiler_opt import new_ccompiler_opt - -# Fix Python distutils bug sf #1718574: -_l = old_build_clib.user_options -for _i in range(len(_l)): - if _l[_i][0] in ['build-clib', 'build-temp']: - _l[_i] = (_l[_i][0] + '=',) + _l[_i][1:] -# - - -class build_clib(old_build_clib): - - description = "build C/C++/F libraries used by Python extensions" - - user_options = old_build_clib.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('inplace', 'i', 'Build in-place'), - ('parallel=', 'j', - "number of parallel jobs"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ('cpu-baseline=', None, - "specify a list of enabled baseline CPU optimizations"), - ('cpu-dispatch=', None, - "specify a list of dispatched CPU optimizations"), - ('disable-optimization', None, - "disable CPU optimized code(dispatch,simd,fast...)"), - ] - - boolean_options = old_build_clib.boolean_options + \ - ['inplace', 'warn-error', 'disable-optimization'] - - def initialize_options(self): - old_build_clib.initialize_options(self) - self.fcompiler = None - self.inplace = 0 - self.parallel = None - self.warn_error = None - self.cpu_baseline = None - self.cpu_dispatch = None - self.disable_optimization = None - - - def finalize_options(self): - if self.parallel: - try: - self.parallel = int(self.parallel) - except ValueError as e: - raise ValueError("--parallel/-j argument must be an integer") from e - old_build_clib.finalize_options(self) - self.set_undefined_options('build', - ('parallel', 'parallel'), - ('warn_error', 'warn_error'), - ('cpu_baseline', 'cpu_baseline'), - ('cpu_dispatch', 'cpu_dispatch'), - ('disable_optimization', 'disable_optimization') - ) - - def have_f_sources(self): - for (lib_name, build_info) in self.libraries: - if has_f_sources(build_info.get('sources', [])): - return True - return False - - def have_cxx_sources(self): - for (lib_name, build_info) in self.libraries: - if has_cxx_sources(build_info.get('sources', [])): - return True - return False - - def run(self): - if not self.libraries: - return - - # Make sure that library sources are complete. - languages = [] - - # Make sure that extension sources are complete. - self.run_command('build_src') - - for (lib_name, build_info) in self.libraries: - l = build_info.get('language', None) - if l and l not in languages: - languages.append(l) - - from distutils.ccompiler import new_compiler - self.compiler = new_compiler(compiler=self.compiler, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution, - need_cxx=self.have_cxx_sources()) - - if self.warn_error: - self.compiler.compiler.append('-Werror') - self.compiler.compiler_so.append('-Werror') - - libraries = self.libraries - self.libraries = None - self.compiler.customize_cmd(self) - self.libraries = libraries - - self.compiler.show_customization() - - if not self.disable_optimization: - dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h") - dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath) - opt_cache_path = os.path.abspath( - os.path.join(self.build_temp, 'ccompiler_opt_cache_clib.py') - ) - if hasattr(self, "compiler_opt"): - # By default `CCompilerOpt` update the cache at the exit of - # the process, which may lead to duplicate building - # (see build_extension()/force_rebuild) if run() called - # multiple times within the same os process/thread without - # giving the chance the previous instances of `CCompilerOpt` - # to update the cache. - self.compiler_opt.cache_flush() - - self.compiler_opt = new_ccompiler_opt( - compiler=self.compiler, dispatch_hpath=dispatch_hpath, - cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, - cache_path=opt_cache_path - ) - def report(copt): - log.info("\n########### CLIB COMPILER OPTIMIZATION ###########") - log.info(copt.report(full=True)) - - import atexit - atexit.register(report, self.compiler_opt) - - if self.have_f_sources(): - from numpy.distutils.fcompiler import new_fcompiler - self._f_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90='f90' in languages, - c_compiler=self.compiler) - if self._f_compiler is not None: - self._f_compiler.customize(self.distribution) - - libraries = self.libraries - self.libraries = None - self._f_compiler.customize_cmd(self) - self.libraries = libraries - - self._f_compiler.show_customization() - else: - self._f_compiler = None - - self.build_libraries(self.libraries) - - if self.inplace: - for l in self.distribution.installed_libraries: - libname = self.compiler.library_filename(l.name) - source = os.path.join(self.build_clib, libname) - target = os.path.join(l.target_dir, libname) - self.mkpath(l.target_dir) - shutil.copy(source, target) - - def get_source_files(self): - self.check_library_list(self.libraries) - filenames = [] - for lib in self.libraries: - filenames.extend(get_lib_source_files(lib)) - return filenames - - def build_libraries(self, libraries): - for (lib_name, build_info) in libraries: - self.build_a_library(build_info, lib_name, libraries) - - def assemble_flags(self, in_flags): - """ Assemble flags from flag list - - Parameters - ---------- - in_flags : None or sequence - None corresponds to empty list. Sequence elements can be strings - or callables that return lists of strings. Callable takes `self` as - single parameter. - - Returns - ------- - out_flags : list - """ - if in_flags is None: - return [] - out_flags = [] - for in_flag in in_flags: - if callable(in_flag): - out_flags += in_flag(self) - else: - out_flags.append(in_flag) - return out_flags - - def build_a_library(self, build_info, lib_name, libraries): - # default compilers - compiler = self.compiler - fcompiler = self._f_compiler - - sources = build_info.get('sources') - if sources is None or not is_sequence(sources): - raise DistutilsSetupError(("in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames") % lib_name) - sources = list(sources) - - c_sources, cxx_sources, f_sources, fmodule_sources \ - = filter_sources(sources) - requiref90 = not not fmodule_sources or \ - build_info.get('language', 'c') == 'f90' - - # save source type information so that build_ext can use it. - source_languages = [] - if c_sources: - source_languages.append('c') - if cxx_sources: - source_languages.append('c++') - if requiref90: - source_languages.append('f90') - elif f_sources: - source_languages.append('f77') - build_info['source_languages'] = source_languages - - lib_file = compiler.library_filename(lib_name, - output_dir=self.build_clib) - depends = sources + build_info.get('depends', []) - - force_rebuild = self.force - if not self.disable_optimization and not self.compiler_opt.is_cached(): - log.debug("Detected changes on compiler optimizations") - force_rebuild = True - if not (force_rebuild or newer_group(depends, lib_file, 'newer')): - log.debug("skipping '%s' library (up-to-date)", lib_name) - return - else: - log.info("building '%s' library", lib_name) - - config_fc = build_info.get('config_fc', {}) - if fcompiler is not None and config_fc: - log.info('using additional config_fc from setup script ' - 'for fortran compiler: %s' - % (config_fc,)) - from numpy.distutils.fcompiler import new_fcompiler - fcompiler = new_fcompiler(compiler=fcompiler.compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=requiref90, - c_compiler=self.compiler) - if fcompiler is not None: - dist = self.distribution - base_config_fc = dist.get_option_dict('config_fc').copy() - base_config_fc.update(config_fc) - fcompiler.customize(base_config_fc) - - # check availability of Fortran compilers - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("library %s has Fortran sources" - " but no Fortran compiler found" % (lib_name)) - - if fcompiler is not None: - fcompiler.extra_f77_compile_args = build_info.get( - 'extra_f77_compile_args') or [] - fcompiler.extra_f90_compile_args = build_info.get( - 'extra_f90_compile_args') or [] - - macros = build_info.get('macros') - if macros is None: - macros = [] - include_dirs = build_info.get('include_dirs') - if include_dirs is None: - include_dirs = [] - # Flags can be strings, or callables that return a list of strings. - extra_postargs = self.assemble_flags( - build_info.get('extra_compiler_args')) - extra_cflags = self.assemble_flags( - build_info.get('extra_cflags')) - extra_cxxflags = self.assemble_flags( - build_info.get('extra_cxxflags')) - - include_dirs.extend(get_numpy_include_dirs()) - # where compiled F90 module files are: - module_dirs = build_info.get('module_dirs') or [] - module_build_dir = os.path.dirname(lib_file) - if requiref90: - self.mkpath(module_build_dir) - - if compiler.compiler_type == 'msvc': - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - extra_cflags += extra_cxxflags - - # filtering C dispatch-table sources when optimization is not disabled, - # otherwise treated as normal sources. - copt_c_sources = [] - copt_cxx_sources = [] - copt_baseline_flags = [] - copt_macros = [] - if not self.disable_optimization: - bsrc_dir = self.get_finalized_command("build_src").build_src - dispatch_hpath = os.path.join("numpy", "distutils", "include") - dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) - include_dirs.append(dispatch_hpath) - # copt_build_src = None if self.inplace else bsrc_dir - copt_build_src = bsrc_dir - for _srcs, _dst, _ext in ( - ((c_sources,), copt_c_sources, ('.dispatch.c',)), - ((c_sources, cxx_sources), copt_cxx_sources, - ('.dispatch.cpp', '.dispatch.cxx')) - ): - for _src in _srcs: - _dst += [ - _src.pop(_src.index(s)) - for s in _src[:] if s.endswith(_ext) - ] - copt_baseline_flags = self.compiler_opt.cpu_baseline_flags() - else: - copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1)) - - objects = [] - if copt_cxx_sources: - log.info("compiling C++ dispatch-able sources") - objects += self.compiler_opt.try_dispatch( - copt_c_sources, - output_dir=self.build_temp, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs + extra_cxxflags, - ccompiler=cxx_compiler - ) - - if copt_c_sources: - log.info("compiling C dispatch-able sources") - objects += self.compiler_opt.try_dispatch( - copt_c_sources, - output_dir=self.build_temp, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs + extra_cflags) - - if c_sources: - log.info("compiling C sources") - objects += compiler.compile( - c_sources, - output_dir=self.build_temp, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_postargs + - copt_baseline_flags + - extra_cflags)) - - if cxx_sources: - log.info("compiling C++ sources") - cxx_compiler = compiler.cxx_compiler() - cxx_objects = cxx_compiler.compile( - cxx_sources, - output_dir=self.build_temp, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_postargs + - copt_baseline_flags + - extra_cxxflags)) - objects.extend(cxx_objects) - - if f_sources or fmodule_sources: - extra_postargs = [] - f_objects = [] - - if requiref90: - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options( - module_dirs, module_build_dir) - - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - - if requiref90 and self._f_compiler.module_dir_switch is None: - # move new compiled F90 module files to module_build_dir - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f) == os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' - % (f, module_build_dir)) - - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - else: - f_objects = [] - - if f_objects and not fcompiler.can_ccompiler_link(compiler): - # Default linker cannot link Fortran object files, and results - # need to be wrapped later. Instead of creating a real static - # library, just keep track of the object files. - listfn = os.path.join(self.build_clib, - lib_name + '.fobjects') - with open(listfn, 'w') as f: - f.write("\n".join(os.path.abspath(obj) for obj in f_objects)) - - listfn = os.path.join(self.build_clib, - lib_name + '.cobjects') - with open(listfn, 'w') as f: - f.write("\n".join(os.path.abspath(obj) for obj in objects)) - - # create empty "library" file for dependency tracking - lib_fname = os.path.join(self.build_clib, - lib_name + compiler.static_lib_extension) - with open(lib_fname, 'wb') as f: - pass - else: - # assume that default linker is suitable for - # linking Fortran object files - objects.extend(f_objects) - compiler.create_static_lib(objects, lib_name, - output_dir=self.build_clib, - debug=self.debug) - - # fix library dependencies - clib_libraries = build_info.get('libraries', []) - for lname, binfo in libraries: - if lname in clib_libraries: - clib_libraries.extend(binfo.get('libraries', [])) - if clib_libraries: - build_info['libraries'] = clib_libraries diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py deleted file mode 100644 index 42137e5f859d..000000000000 --- a/numpy/distutils/command/build_ext.py +++ /dev/null @@ -1,752 +0,0 @@ -""" Modified version of build_ext that handles fortran source files. - -""" -import os -import subprocess -from glob import glob - -from distutils.dep_util import newer_group -from distutils.command.build_ext import build_ext as old_build_ext -from distutils.errors import DistutilsFileError, DistutilsSetupError,\ - DistutilsError -from distutils.file_util import copy_file - -from numpy.distutils import log -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.system_info import combine_paths -from numpy.distutils.misc_util import ( - filter_sources, get_ext_source_files, get_numpy_include_dirs, - has_cxx_sources, has_f_sources, is_sequence -) -from numpy.distutils.command.config_compiler import show_fortran_compilers -from numpy.distutils.ccompiler_opt import new_ccompiler_opt, CCompilerOpt - -class build_ext (old_build_ext): - - description = "build C/C++/F extensions (compile/link to build directory)" - - user_options = old_build_ext.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('parallel=', 'j', - "number of parallel jobs"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ('cpu-baseline=', None, - "specify a list of enabled baseline CPU optimizations"), - ('cpu-dispatch=', None, - "specify a list of dispatched CPU optimizations"), - ('disable-optimization', None, - "disable CPU optimized code(dispatch,simd,fast...)"), - ('simd-test=', None, - "specify a list of CPU optimizations to be tested against NumPy SIMD interface"), - ] - - help_options = old_build_ext.help_options + [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - boolean_options = old_build_ext.boolean_options + ['warn-error', 'disable-optimization'] - - def initialize_options(self): - old_build_ext.initialize_options(self) - self.fcompiler = None - self.parallel = None - self.warn_error = None - self.cpu_baseline = None - self.cpu_dispatch = None - self.disable_optimization = None - self.simd_test = None - - def finalize_options(self): - if self.parallel: - try: - self.parallel = int(self.parallel) - except ValueError as e: - raise ValueError("--parallel/-j argument must be an integer") from e - - # Ensure that self.include_dirs and self.distribution.include_dirs - # refer to the same list object. finalize_options will modify - # self.include_dirs, but self.distribution.include_dirs is used - # during the actual build. - # self.include_dirs is None unless paths are specified with - # --include-dirs. - # The include paths will be passed to the compiler in the order: - # numpy paths, --include-dirs paths, Python include path. - if isinstance(self.include_dirs, str): - self.include_dirs = self.include_dirs.split(os.pathsep) - incl_dirs = self.include_dirs or [] - if self.distribution.include_dirs is None: - self.distribution.include_dirs = [] - self.include_dirs = self.distribution.include_dirs - self.include_dirs.extend(incl_dirs) - - old_build_ext.finalize_options(self) - self.set_undefined_options('build', - ('parallel', 'parallel'), - ('warn_error', 'warn_error'), - ('cpu_baseline', 'cpu_baseline'), - ('cpu_dispatch', 'cpu_dispatch'), - ('disable_optimization', 'disable_optimization'), - ('simd_test', 'simd_test') - ) - CCompilerOpt.conf_target_groups["simd_test"] = self.simd_test - - def run(self): - if not self.extensions: - return - - # Make sure that extension sources are complete. - self.run_command('build_src') - - if self.distribution.has_c_libraries(): - if self.inplace: - if self.distribution.have_run.get('build_clib'): - log.warn('build_clib already run, it is too late to ' - 'ensure in-place build of build_clib') - build_clib = self.distribution.get_command_obj( - 'build_clib') - else: - build_clib = self.distribution.get_command_obj( - 'build_clib') - build_clib.inplace = 1 - build_clib.ensure_finalized() - build_clib.run() - self.distribution.have_run['build_clib'] = 1 - - else: - self.run_command('build_clib') - build_clib = self.get_finalized_command('build_clib') - self.library_dirs.append(build_clib.build_clib) - else: - build_clib = None - - # Not including C libraries to the list of - # extension libraries automatically to prevent - # bogus linking commands. Extensions must - # explicitly specify the C libraries that they use. - - from distutils.ccompiler import new_compiler - from numpy.distutils.fcompiler import new_fcompiler - - compiler_type = self.compiler - # Initialize C compiler: - self.compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution) - self.compiler.customize_cmd(self) - - if self.warn_error: - self.compiler.compiler.append('-Werror') - self.compiler.compiler_so.append('-Werror') - - self.compiler.show_customization() - - if not self.disable_optimization: - dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h") - dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath) - opt_cache_path = os.path.abspath( - os.path.join(self.build_temp, 'ccompiler_opt_cache_ext.py') - ) - if hasattr(self, "compiler_opt"): - # By default `CCompilerOpt` update the cache at the exit of - # the process, which may lead to duplicate building - # (see build_extension()/force_rebuild) if run() called - # multiple times within the same os process/thread without - # giving the chance the previous instances of `CCompilerOpt` - # to update the cache. - self.compiler_opt.cache_flush() - - self.compiler_opt = new_ccompiler_opt( - compiler=self.compiler, dispatch_hpath=dispatch_hpath, - cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, - cache_path=opt_cache_path - ) - def report(copt): - log.info("\n########### EXT COMPILER OPTIMIZATION ###########") - log.info(copt.report(full=True)) - - import atexit - atexit.register(report, self.compiler_opt) - - # Setup directory for storing generated extra DLL files on Windows - self.extra_dll_dir = os.path.join(self.build_temp, '.libs') - if not os.path.isdir(self.extra_dll_dir): - os.makedirs(self.extra_dll_dir) - - # Create mapping of libraries built by build_clib: - clibs = {} - if build_clib is not None: - for libname, build_info in build_clib.libraries or []: - if libname in clibs and clibs[libname] != build_info: - log.warn('library %r defined more than once,' - ' overwriting build_info\n%s... \nwith\n%s...' - % (libname, repr(clibs[libname])[:300], repr(build_info)[:300])) - clibs[libname] = build_info - # .. and distribution libraries: - for libname, build_info in self.distribution.libraries or []: - if libname in clibs: - # build_clib libraries have a precedence before distribution ones - continue - clibs[libname] = build_info - - # Determine if C++/Fortran 77/Fortran 90 compilers are needed. - # Update extension libraries, library_dirs, and macros. - all_languages = set() - for ext in self.extensions: - ext_languages = set() - c_libs = [] - c_lib_dirs = [] - macros = [] - for libname in ext.libraries: - if libname in clibs: - binfo = clibs[libname] - c_libs += binfo.get('libraries', []) - c_lib_dirs += binfo.get('library_dirs', []) - for m in binfo.get('macros', []): - if m not in macros: - macros.append(m) - - for l in clibs.get(libname, {}).get('source_languages', []): - ext_languages.add(l) - if c_libs: - new_c_libs = ext.libraries + c_libs - log.info('updating extension %r libraries from %r to %r' - % (ext.name, ext.libraries, new_c_libs)) - ext.libraries = new_c_libs - ext.library_dirs = ext.library_dirs + c_lib_dirs - if macros: - log.info('extending extension %r defined_macros with %r' - % (ext.name, macros)) - ext.define_macros = ext.define_macros + macros - - # determine extension languages - if has_f_sources(ext.sources): - ext_languages.add('f77') - if has_cxx_sources(ext.sources): - ext_languages.add('c++') - l = ext.language or self.compiler.detect_language(ext.sources) - if l: - ext_languages.add(l) - - # reset language attribute for choosing proper linker - # - # When we build extensions with multiple languages, we have to - # choose a linker. The rules here are: - # 1. if there is Fortran code, always prefer the Fortran linker, - # 2. otherwise prefer C++ over C, - # 3. Users can force a particular linker by using - # `language='c'` # or 'c++', 'f90', 'f77' - # in their config.add_extension() calls. - if 'c++' in ext_languages: - ext_language = 'c++' - else: - ext_language = 'c' # default - - has_fortran = False - if 'f90' in ext_languages: - ext_language = 'f90' - has_fortran = True - elif 'f77' in ext_languages: - ext_language = 'f77' - has_fortran = True - - if not ext.language or has_fortran: - if l and l != ext_language and ext.language: - log.warn('resetting extension %r language from %r to %r.' % - (ext.name, l, ext_language)) - - ext.language = ext_language - - # global language - all_languages.update(ext_languages) - - need_f90_compiler = 'f90' in all_languages - need_f77_compiler = 'f77' in all_languages - need_cxx_compiler = 'c++' in all_languages - - # Initialize C++ compiler: - if need_cxx_compiler: - self._cxx_compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - compiler = self._cxx_compiler - compiler.customize(self.distribution, need_cxx=need_cxx_compiler) - compiler.customize_cmd(self) - compiler.show_customization() - self._cxx_compiler = compiler.cxx_compiler() - else: - self._cxx_compiler = None - - # Initialize Fortran 77 compiler: - if need_f77_compiler: - ctype = self.fcompiler - self._f77_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=False, - c_compiler=self.compiler) - fcompiler = self._f77_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f77_compiler=%s is not available.' % - (ctype)) - self._f77_compiler = None - else: - self._f77_compiler = None - - # Initialize Fortran 90 compiler: - if need_f90_compiler: - ctype = self.fcompiler - self._f90_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=True, - c_compiler=self.compiler) - fcompiler = self._f90_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f90_compiler=%s is not available.' % - (ctype)) - self._f90_compiler = None - else: - self._f90_compiler = None - - # Build extensions - self.build_extensions() - - # Copy over any extra DLL files - # FIXME: In the case where there are more than two packages, - # we blindly assume that both packages need all of the libraries, - # resulting in a larger wheel than is required. This should be fixed, - # but it's so rare that I won't bother to handle it. - pkg_roots = { - self.get_ext_fullname(ext.name).split('.')[0] - for ext in self.extensions - } - for pkg_root in pkg_roots: - shared_lib_dir = os.path.join(pkg_root, '.libs') - if not self.inplace: - shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir) - for fn in os.listdir(self.extra_dll_dir): - if not os.path.isdir(shared_lib_dir): - os.makedirs(shared_lib_dir) - if not fn.lower().endswith('.dll'): - continue - runtime_lib = os.path.join(self.extra_dll_dir, fn) - copy_file(runtime_lib, shared_lib_dir) - - def swig_sources(self, sources, extensions=None): - # Do nothing. Swig sources have been handled in build_src command. - return sources - - def build_extension(self, ext): - sources = ext.sources - if sources is None or not is_sequence(sources): - raise DistutilsSetupError( - ("in 'ext_modules' option (extension '%s'), " - "'sources' must be present and must be " - "a list of source filenames") % ext.name) - sources = list(sources) - - if not sources: - return - - fullname = self.get_ext_fullname(ext.name) - if self.inplace: - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - base = modpath[-1] - build_py = self.get_finalized_command('build_py') - package_dir = build_py.get_package_dir(package) - ext_filename = os.path.join(package_dir, - self.get_ext_filename(base)) - else: - ext_filename = os.path.join(self.build_lib, - self.get_ext_filename(fullname)) - depends = sources + ext.depends - - force_rebuild = self.force - if not self.disable_optimization and not self.compiler_opt.is_cached(): - log.debug("Detected changes on compiler optimizations") - force_rebuild = True - if not (force_rebuild or newer_group(depends, ext_filename, 'newer')): - log.debug("skipping '%s' extension (up-to-date)", ext.name) - return - else: - log.info("building '%s' extension", ext.name) - - extra_args = ext.extra_compile_args or [] - extra_cflags = getattr(ext, 'extra_c_compile_args', None) or [] - extra_cxxflags = getattr(ext, 'extra_cxx_compile_args', None) or [] - - macros = ext.define_macros[:] - for undef in ext.undef_macros: - macros.append((undef,)) - - c_sources, cxx_sources, f_sources, fmodule_sources = \ - filter_sources(ext.sources) - - if self.compiler.compiler_type == 'msvc': - if cxx_sources: - # Needed to compile kiva.agg._agg extension. - extra_args.append('/Zm1000') - extra_cflags += extra_cxxflags - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - - # Set Fortran/C++ compilers for compilation and linking. - if ext.language == 'f90': - fcompiler = self._f90_compiler - elif ext.language == 'f77': - fcompiler = self._f77_compiler - else: # in case ext.language is c++, for instance - fcompiler = self._f90_compiler or self._f77_compiler - if fcompiler is not None: - fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr( - ext, 'extra_f77_compile_args') else [] - fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr( - ext, 'extra_f90_compile_args') else [] - cxx_compiler = self._cxx_compiler - - # check for the availability of required compilers - if cxx_sources and cxx_compiler is None: - raise DistutilsError("extension %r has C++ sources" - "but no C++ compiler found" % (ext.name)) - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("extension %r has Fortran sources " - "but no Fortran compiler found" % (ext.name)) - if ext.language in ['f77', 'f90'] and fcompiler is None: - self.warn("extension %r has Fortran libraries " - "but no Fortran linker found, using default linker" % (ext.name)) - if ext.language == 'c++' and cxx_compiler is None: - self.warn("extension %r has C++ libraries " - "but no C++ linker found, using default linker" % (ext.name)) - - kws = {'depends': ext.depends} - output_dir = self.build_temp - - include_dirs = ext.include_dirs + get_numpy_include_dirs() - - # filtering C dispatch-table sources when optimization is not disabled, - # otherwise treated as normal sources. - copt_c_sources = [] - copt_cxx_sources = [] - copt_baseline_flags = [] - copt_macros = [] - if not self.disable_optimization: - bsrc_dir = self.get_finalized_command("build_src").build_src - dispatch_hpath = os.path.join("numpy", "distutils", "include") - dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) - include_dirs.append(dispatch_hpath) - - # copt_build_src = None if self.inplace else bsrc_dir - # Always generate the generated config files and - # dispatch-able sources inside the build directory, - # even if the build option `inplace` is enabled. - # This approach prevents conflicts with Meson-generated - # config headers. Since `spin build --clean` will not remove - # these headers, they might overwrite the generated Meson headers, - # causing compatibility issues. Maintaining separate directories - # ensures compatibility between distutils dispatch config headers - # and Meson headers, avoiding build disruptions. - # See gh-24450 for more details. - copt_build_src = bsrc_dir - for _srcs, _dst, _ext in ( - ((c_sources,), copt_c_sources, ('.dispatch.c',)), - ((c_sources, cxx_sources), copt_cxx_sources, - ('.dispatch.cpp', '.dispatch.cxx')) - ): - for _src in _srcs: - _dst += [ - _src.pop(_src.index(s)) - for s in _src[:] if s.endswith(_ext) - ] - copt_baseline_flags = self.compiler_opt.cpu_baseline_flags() - else: - copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1)) - - c_objects = [] - if copt_cxx_sources: - log.info("compiling C++ dispatch-able sources") - c_objects += self.compiler_opt.try_dispatch( - copt_cxx_sources, - output_dir=output_dir, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args + extra_cxxflags, - ccompiler=cxx_compiler, - **kws - ) - if copt_c_sources: - log.info("compiling C dispatch-able sources") - c_objects += self.compiler_opt.try_dispatch( - copt_c_sources, - output_dir=output_dir, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args + extra_cflags, - **kws) - if c_sources: - log.info("compiling C sources") - c_objects += self.compiler.compile( - c_sources, - output_dir=output_dir, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_args + copt_baseline_flags + - extra_cflags), - **kws) - if cxx_sources: - log.info("compiling C++ sources") - c_objects += cxx_compiler.compile( - cxx_sources, - output_dir=output_dir, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_args + copt_baseline_flags + - extra_cxxflags), - **kws) - - extra_postargs = [] - f_objects = [] - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - module_dirs = ext.module_dirs[:] - module_build_dir = os.path.join( - self.build_temp, os.path.dirname( - self.get_ext_filename(fullname))) - - self.mkpath(module_build_dir) - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options( - module_dirs, module_build_dir) - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - if fcompiler.module_dir_switch is None: - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f) == os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' % - (f, module_build_dir)) - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - if f_objects and not fcompiler.can_ccompiler_link(self.compiler): - unlinkable_fobjects = f_objects - objects = c_objects - else: - unlinkable_fobjects = [] - objects = c_objects + f_objects - - if ext.extra_objects: - objects.extend(ext.extra_objects) - extra_args = ext.extra_link_args or [] - libraries = self.get_libraries(ext)[:] - library_dirs = ext.library_dirs[:] - - linker = self.compiler.link_shared_object - # Always use system linker when using MSVC compiler. - if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'): - # expand libraries with fcompiler libraries as we are - # not using fcompiler linker - self._libs_with_msvc_and_fortran( - fcompiler, libraries, library_dirs) - if ext.runtime_library_dirs: - # gcc adds RPATH to the link. On windows, copy the dll into - # self.extra_dll_dir instead. - for d in ext.runtime_library_dirs: - for f in glob(d + '/*.dll'): - copy_file(f, self.extra_dll_dir) - ext.runtime_library_dirs = [] - - elif ext.language in ['f77', 'f90'] and fcompiler is not None: - linker = fcompiler.link_shared_object - if ext.language == 'c++' and cxx_compiler is not None: - linker = cxx_compiler.link_shared_object - - if fcompiler is not None: - objects, libraries = self._process_unlinkable_fobjects( - objects, libraries, - fcompiler, library_dirs, - unlinkable_fobjects) - - linker(objects, ext_filename, - libraries=libraries, - library_dirs=library_dirs, - runtime_library_dirs=ext.runtime_library_dirs, - extra_postargs=extra_args, - export_symbols=self.get_export_symbols(ext), - debug=self.debug, - build_temp=self.build_temp, - target_lang=ext.language) - - def _add_dummy_mingwex_sym(self, c_sources): - build_src = self.get_finalized_command("build_src").build_src - build_clib = self.get_finalized_command("build_clib").build_clib - objects = self.compiler.compile([os.path.join(build_src, - "gfortran_vs2003_hack.c")], - output_dir=self.build_temp) - self.compiler.create_static_lib( - objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug) - - def _process_unlinkable_fobjects(self, objects, libraries, - fcompiler, library_dirs, - unlinkable_fobjects): - libraries = list(libraries) - objects = list(objects) - unlinkable_fobjects = list(unlinkable_fobjects) - - # Expand possible fake static libraries to objects; - # make sure to iterate over a copy of the list as - # "fake" libraries will be removed as they are - # encountered - for lib in libraries[:]: - for libdir in library_dirs: - fake_lib = os.path.join(libdir, lib + '.fobjects') - if os.path.isfile(fake_lib): - # Replace fake static library - libraries.remove(lib) - with open(fake_lib) as f: - unlinkable_fobjects.extend(f.read().splitlines()) - - # Expand C objects - c_lib = os.path.join(libdir, lib + '.cobjects') - with open(c_lib) as f: - objects.extend(f.read().splitlines()) - - # Wrap unlinkable objects to a linkable one - if unlinkable_fobjects: - fobjects = [os.path.abspath(obj) for obj in unlinkable_fobjects] - wrapped = fcompiler.wrap_unlinkable_objects( - fobjects, output_dir=self.build_temp, - extra_dll_dir=self.extra_dll_dir) - objects.extend(wrapped) - - return objects, libraries - - def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries, - c_library_dirs): - if fcompiler is None: - return - - for libname in c_libraries: - if libname.startswith('msvc'): - continue - fileexists = False - for libdir in c_library_dirs or []: - libfile = os.path.join(libdir, '%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: - continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in c_library_dirs: - libfile = os.path.join(libdir, 'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(self.build_temp, libname + '.lib') - copy_file(libfile, libfile2) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - fileexists = True - break - if fileexists: - continue - log.warn('could not find library %r in directories %s' - % (libname, c_library_dirs)) - - # Always use system linker when using MSVC compiler. - f_lib_dirs = [] - for dir in fcompiler.library_dirs: - # correct path when compiling in Cygwin but with normal Win - # Python - if dir.startswith('/usr/lib'): - try: - dir = subprocess.check_output(['cygpath', '-w', dir]) - except (OSError, subprocess.CalledProcessError): - pass - else: - dir = filepath_from_subprocess_output(dir) - f_lib_dirs.append(dir) - c_library_dirs.extend(f_lib_dirs) - - # make g77-compiled static libs available to MSVC - for lib in fcompiler.libraries: - if not lib.startswith('msvc'): - c_libraries.append(lib) - p = combine_paths(f_lib_dirs, 'lib' + lib + '.a') - if p: - dst_name = os.path.join(self.build_temp, lib + '.lib') - if not os.path.isfile(dst_name): - copy_file(p[0], dst_name) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - - def get_source_files(self): - self.check_extensions_list(self.extensions) - filenames = [] - for ext in self.extensions: - filenames.extend(get_ext_source_files(ext)) - return filenames - - def get_outputs(self): - self.check_extensions_list(self.extensions) - - outputs = [] - for ext in self.extensions: - if not ext.sources: - continue - fullname = self.get_ext_fullname(ext.name) - outputs.append(os.path.join(self.build_lib, - self.get_ext_filename(fullname))) - return outputs diff --git a/numpy/distutils/command/build_py.py b/numpy/distutils/command/build_py.py deleted file mode 100644 index d30dc5bf42d8..000000000000 --- a/numpy/distutils/command/build_py.py +++ /dev/null @@ -1,31 +0,0 @@ -from distutils.command.build_py import build_py as old_build_py -from numpy.distutils.misc_util import is_string - -class build_py(old_build_py): - - def run(self): - build_src = self.get_finalized_command('build_src') - if build_src.py_modules_dict and self.packages is None: - self.packages = list(build_src.py_modules_dict.keys ()) - old_build_py.run(self) - - def find_package_modules(self, package, package_dir): - modules = old_build_py.find_package_modules(self, package, package_dir) - - # Find build_src generated *.py files. - build_src = self.get_finalized_command('build_src') - modules += build_src.py_modules_dict.get(package, []) - - return modules - - def find_modules(self): - old_py_modules = self.py_modules[:] - new_py_modules = [_m for _m in self.py_modules if is_string(_m)] - self.py_modules[:] = new_py_modules - modules = old_build_py.find_modules(self) - self.py_modules[:] = old_py_modules - - return modules - - # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple - # and item[2] is source file. diff --git a/numpy/distutils/command/build_scripts.py b/numpy/distutils/command/build_scripts.py deleted file mode 100644 index d5cadb2745fe..000000000000 --- a/numpy/distutils/command/build_scripts.py +++ /dev/null @@ -1,49 +0,0 @@ -""" Modified version of build_scripts that handles building scripts from functions. - -""" -from distutils.command.build_scripts import build_scripts as old_build_scripts -from numpy.distutils import log -from numpy.distutils.misc_util import is_string - -class build_scripts(old_build_scripts): - - def generate_scripts(self, scripts): - new_scripts = [] - func_scripts = [] - for script in scripts: - if is_string(script): - new_scripts.append(script) - else: - func_scripts.append(script) - if not func_scripts: - return new_scripts - - build_dir = self.build_dir - self.mkpath(build_dir) - for func in func_scripts: - script = func(build_dir) - if not script: - continue - if is_string(script): - log.info(" adding '%s' to scripts" % (script,)) - new_scripts.append(script) - else: - [log.info(" adding '%s' to scripts" % (s,)) for s in script] - new_scripts.extend(list(script)) - return new_scripts - - def run (self): - if not self.scripts: - return - - self.scripts = self.generate_scripts(self.scripts) - # Now make sure that the distribution object has this list of scripts. - # setuptools' develop command requires that this be a list of filenames, - # not functions. - self.distribution.scripts = self.scripts - - return old_build_scripts.run(self) - - def get_source_files(self): - from numpy.distutils.misc_util import get_script_files - return get_script_files(self.scripts) diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py deleted file mode 100644 index cfcc80caecd6..000000000000 --- a/numpy/distutils/command/build_src.py +++ /dev/null @@ -1,773 +0,0 @@ -""" Build swig and f2py sources. -""" -import os -import re -import sys -import shlex -import copy - -from distutils.command import build_ext -from distutils.dep_util import newer_group, newer -from distutils.util import get_platform -from distutils.errors import DistutilsError, DistutilsSetupError - - -# this import can't be done here, as it uses numpy stuff only available -# after it's installed -#import numpy.f2py -from numpy.distutils import log -from numpy.distutils.misc_util import ( - fortran_ext_match, appendpath, is_string, is_sequence, get_cmd - ) -from numpy.distutils.from_template import process_file as process_f_file -from numpy.distutils.conv_template import process_file as process_c_file - -def subst_vars(target, source, d): - """Substitute any occurrence of @foo@ by d['foo'] from source file into - target.""" - var = re.compile('@([a-zA-Z_]+)@') - with open(source, 'r') as fs: - with open(target, 'w') as ft: - for l in fs: - m = var.search(l) - if m: - ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)])) - else: - ft.write(l) - -class build_src(build_ext.build_ext): - - description = "build sources from SWIG, F2PY files or a function" - - user_options = [ - ('build-src=', 'd', "directory to \"build\" sources to"), - ('f2py-opts=', None, "list of f2py command line options"), - ('swig=', None, "path to the SWIG executable"), - ('swig-opts=', None, "list of SWIG command line options"), - ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"), - ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete - ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ('inplace', 'i', - "ignore build-lib and put compiled extensions into the source " - "directory alongside your pure Python modules"), - ('verbose-cfg', None, - "change logging level from WARN to INFO which will show all " - "compiler output") - ] - - boolean_options = ['force', 'inplace', 'verbose-cfg'] - - help_options = [] - - def initialize_options(self): - self.extensions = None - self.package = None - self.py_modules = None - self.py_modules_dict = None - self.build_src = None - self.build_lib = None - self.build_base = None - self.force = None - self.inplace = None - self.package_dir = None - self.f2pyflags = None # obsolete - self.f2py_opts = None - self.swigflags = None # obsolete - self.swig_opts = None - self.swig_cpp = None - self.swig = None - self.verbose_cfg = None - - def finalize_options(self): - self.set_undefined_options('build', - ('build_base', 'build_base'), - ('build_lib', 'build_lib'), - ('force', 'force')) - if self.package is None: - self.package = self.distribution.ext_package - self.extensions = self.distribution.ext_modules - self.libraries = self.distribution.libraries or [] - self.py_modules = self.distribution.py_modules or [] - self.data_files = self.distribution.data_files or [] - - if self.build_src is None: - plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) - self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) - - # py_modules_dict is used in build_py.find_package_modules - self.py_modules_dict = {} - - if self.f2pyflags: - if self.f2py_opts: - log.warn('ignoring --f2pyflags as --f2py-opts already used') - else: - self.f2py_opts = self.f2pyflags - self.f2pyflags = None - if self.f2py_opts is None: - self.f2py_opts = [] - else: - self.f2py_opts = shlex.split(self.f2py_opts) - - if self.swigflags: - if self.swig_opts: - log.warn('ignoring --swigflags as --swig-opts already used') - else: - self.swig_opts = self.swigflags - self.swigflags = None - - if self.swig_opts is None: - self.swig_opts = [] - else: - self.swig_opts = shlex.split(self.swig_opts) - - # use options from build_ext command - build_ext = self.get_finalized_command('build_ext') - if self.inplace is None: - self.inplace = build_ext.inplace - if self.swig_cpp is None: - self.swig_cpp = build_ext.swig_cpp - for c in ['swig', 'swig_opt']: - o = '--'+c.replace('_', '-') - v = getattr(build_ext, c, None) - if v: - if getattr(self, c): - log.warn('both build_src and build_ext define %s option' % (o)) - else: - log.info('using "%s=%s" option from build_ext command' % (o, v)) - setattr(self, c, v) - - def run(self): - log.info("build_src") - if not (self.extensions or self.libraries): - return - self.build_sources() - - def build_sources(self): - - if self.inplace: - self.get_package_dir = \ - self.get_finalized_command('build_py').get_package_dir - - self.build_py_modules_sources() - - for libname_info in self.libraries: - self.build_library_sources(*libname_info) - - if self.extensions: - self.check_extensions_list(self.extensions) - - for ext in self.extensions: - self.build_extension_sources(ext) - - self.build_data_files_sources() - self.build_npy_pkg_config() - - def build_data_files_sources(self): - if not self.data_files: - return - log.info('building data_files sources') - from numpy.distutils.misc_util import get_data_files - new_data_files = [] - for data in self.data_files: - if isinstance(data, str): - new_data_files.append(data) - elif isinstance(data, tuple): - d, files = data - if self.inplace: - build_dir = self.get_package_dir('.'.join(d.split(os.sep))) - else: - build_dir = os.path.join(self.build_src, d) - funcs = [f for f in files if hasattr(f, '__call__')] - files = [f for f in files if not hasattr(f, '__call__')] - for f in funcs: - if f.__code__.co_argcount==1: - s = f(build_dir) - else: - s = f() - if s is not None: - if isinstance(s, list): - files.extend(s) - elif isinstance(s, str): - files.append(s) - else: - raise TypeError(repr(s)) - filenames = get_data_files((d, files)) - new_data_files.append((d, filenames)) - else: - raise TypeError(repr(data)) - self.data_files[:] = new_data_files - - - def _build_npy_pkg_config(self, info, gd): - template, install_dir, subst_dict = info - template_dir = os.path.dirname(template) - for k, v in gd.items(): - subst_dict[k] = v - - if self.inplace == 1: - generated_dir = os.path.join(template_dir, install_dir) - else: - generated_dir = os.path.join(self.build_src, template_dir, - install_dir) - generated = os.path.basename(os.path.splitext(template)[0]) - generated_path = os.path.join(generated_dir, generated) - if not os.path.exists(generated_dir): - os.makedirs(generated_dir) - - subst_vars(generated_path, template, subst_dict) - - # Where to install relatively to install prefix - full_install_dir = os.path.join(template_dir, install_dir) - return full_install_dir, generated_path - - def build_npy_pkg_config(self): - log.info('build_src: building npy-pkg config files') - - # XXX: another ugly workaround to circumvent distutils brain damage. We - # need the install prefix here, but finalizing the options of the - # install command when only building sources cause error. Instead, we - # copy the install command instance, and finalize the copy so that it - # does not disrupt how distutils want to do things when with the - # original install command instance. - install_cmd = copy.copy(get_cmd('install')) - if not install_cmd.finalized == 1: - install_cmd.finalize_options() - build_npkg = False - if self.inplace == 1: - top_prefix = '.' - build_npkg = True - elif hasattr(install_cmd, 'install_libbase'): - top_prefix = install_cmd.install_libbase - build_npkg = True - - if build_npkg: - for pkg, infos in self.distribution.installed_pkg_config.items(): - pkg_path = self.distribution.package_dir[pkg] - prefix = os.path.join(os.path.abspath(top_prefix), pkg_path) - d = {'prefix': prefix} - for info in infos: - install_dir, generated = self._build_npy_pkg_config(info, d) - self.distribution.data_files.append((install_dir, - [generated])) - - def build_py_modules_sources(self): - if not self.py_modules: - return - log.info('building py_modules sources') - new_py_modules = [] - for source in self.py_modules: - if is_sequence(source) and len(source)==3: - package, module_base, source = source - if self.inplace: - build_dir = self.get_package_dir(package) - else: - build_dir = os.path.join(self.build_src, - os.path.join(*package.split('.'))) - if hasattr(source, '__call__'): - target = os.path.join(build_dir, module_base + '.py') - source = source(target) - if source is None: - continue - modules = [(package, module_base, source)] - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - self.py_modules_dict[package] += modules - else: - new_py_modules.append(source) - self.py_modules[:] = new_py_modules - - def build_library_sources(self, lib_name, build_info): - sources = list(build_info.get('sources', [])) - - if not sources: - return - - log.info('building library "%s" sources' % (lib_name)) - - sources = self.generate_sources(sources, (lib_name, build_info)) - - sources = self.template_sources(sources, (lib_name, build_info)) - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - self.package, h_files) - - #for f in h_files: - # self.distribution.headers.append((lib_name,f)) - - build_info['sources'] = sources - return - - def build_extension_sources(self, ext): - - sources = list(ext.sources) - - log.info('building extension "%s" sources' % (ext.name)) - - fullname = self.get_ext_fullname(ext.name) - - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - - if self.inplace: - self.ext_target_dir = self.get_package_dir(package) - - sources = self.generate_sources(sources, ext) - sources = self.template_sources(sources, ext) - sources = self.swig_sources(sources, ext) - sources = self.f2py_sources(sources, ext) - sources = self.pyrex_sources(sources, ext) - - sources, py_files = self.filter_py_files(sources) - - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - modules = [] - for f in py_files: - module = os.path.splitext(os.path.basename(f))[0] - modules.append((package, module, f)) - self.py_modules_dict[package] += modules - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - package, h_files) - #for f in h_files: - # self.distribution.headers.append((package,f)) - - ext.sources = sources - - def generate_sources(self, sources, extension): - new_sources = [] - func_sources = [] - for source in sources: - if is_string(source): - new_sources.append(source) - else: - func_sources.append(source) - if not func_sources: - return new_sources - if self.inplace and not is_sequence(extension): - build_dir = self.ext_target_dir - else: - if is_sequence(extension): - name = extension[0] - # if 'include_dirs' not in extension[1]: - # extension[1]['include_dirs'] = [] - # incl_dirs = extension[1]['include_dirs'] - else: - name = extension.name - # incl_dirs = extension.include_dirs - #if self.build_src not in incl_dirs: - # incl_dirs.append(self.build_src) - build_dir = os.path.join(*([self.build_src] - +name.split('.')[:-1])) - self.mkpath(build_dir) - - if self.verbose_cfg: - new_level = log.INFO - else: - new_level = log.WARN - old_level = log.set_threshold(new_level) - - for func in func_sources: - source = func(extension, build_dir) - if not source: - continue - if is_sequence(source): - [log.info(" adding '%s' to sources." % (s,)) for s in source] - new_sources.extend(source) - else: - log.info(" adding '%s' to sources." % (source,)) - new_sources.append(source) - log.set_threshold(old_level) - return new_sources - - def filter_py_files(self, sources): - return self.filter_files(sources, ['.py']) - - def filter_h_files(self, sources): - return self.filter_files(sources, ['.h', '.hpp', '.inc']) - - def filter_files(self, sources, exts = []): - new_sources = [] - files = [] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext in exts: - files.append(source) - else: - new_sources.append(source) - return new_sources, files - - def template_sources(self, sources, extension): - new_sources = [] - if is_sequence(extension): - depends = extension[1].get('depends') - include_dirs = extension[1].get('include_dirs') - else: - depends = extension.depends - include_dirs = extension.include_dirs - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.src': # Template file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - self.mkpath(target_dir) - target_file = os.path.join(target_dir, os.path.basename(base)) - if (self.force or newer_group([source] + depends, target_file)): - if _f_pyf_ext_match(base): - log.info("from_template:> %s" % (target_file)) - outstr = process_f_file(source) - else: - log.info("conv_template:> %s" % (target_file)) - outstr = process_c_file(source) - with open(target_file, 'w') as fid: - fid.write(outstr) - if _header_ext_match(target_file): - d = os.path.dirname(target_file) - if d not in include_dirs: - log.info(" adding '%s' to include_dirs." % (d)) - include_dirs.append(d) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def pyrex_sources(self, sources, extension): - """Pyrex not supported; this remains for Cython support (see below)""" - new_sources = [] - ext_name = extension.name.split('.')[-1] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyx': - target_file = self.generate_a_pyrex_source(base, ext_name, - source, - extension) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def generate_a_pyrex_source(self, base, ext_name, source, extension): - """Pyrex is not supported, but some projects monkeypatch this method. - - That allows compiling Cython code, see gh-6955. - This method will remain here for compatibility reasons. - """ - return [] - - def f2py_sources(self, sources, extension): - new_sources = [] - f2py_sources = [] - f_sources = [] - f2py_targets = {} - target_dirs = [] - ext_name = extension.name.split('.')[-1] - skip_f2py = 0 - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyf': # F2PY interface file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - if os.path.isfile(source): - name = get_f2py_modulename(source) - if name != ext_name: - raise DistutilsSetupError('mismatch of extension names: %s ' - 'provides %r but expected %r' % ( - source, name, ext_name)) - target_file = os.path.join(target_dir, name+'module.c') - else: - log.debug(' source %s does not exist: skipping f2py\'ing.' \ - % (source)) - name = ext_name - skip_f2py = 1 - target_file = os.path.join(target_dir, name+'module.c') - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %smodule.c was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = os.path.join(target_dir, name+'module.c') - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.info(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - f2py_sources.append(source) - f2py_targets[source] = target_file - new_sources.append(target_file) - elif fortran_ext_match(ext): - f_sources.append(source) - else: - new_sources.append(source) - - if not (f2py_sources or f_sources): - return new_sources - - for d in target_dirs: - self.mkpath(d) - - f2py_options = extension.f2py_options + self.f2py_opts - - if self.distribution.libraries: - for name, build_info in self.distribution.libraries: - if name in extension.libraries: - f2py_options.extend(build_info.get('f2py_options', [])) - - log.info("f2py options: %s" % (f2py_options)) - - if f2py_sources: - if len(f2py_sources) != 1: - raise DistutilsSetupError( - 'only one .pyf file is allowed per extension module but got'\ - ' more: %r' % (f2py_sources,)) - source = f2py_sources[0] - target_file = f2py_targets[source] - target_dir = os.path.dirname(target_file) or '.' - depends = [source] + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py: %s" % (source)) - from numpy.f2py import f2py2e - f2py2e.run_main(f2py_options - + ['--build-dir', target_dir, source]) - else: - log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) - else: - #XXX TODO: --inplace support for sdist command - if is_sequence(extension): - name = extension[0] - else: name = extension.name - target_dir = os.path.join(*([self.build_src] - +name.split('.')[:-1])) - target_file = os.path.join(target_dir, ext_name + 'module.c') - new_sources.append(target_file) - depends = f_sources + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py:> %s" % (target_file)) - self.mkpath(target_dir) - from numpy.f2py import f2py2e - f2py2e.run_main(f2py_options + ['--lower', - '--build-dir', target_dir]+\ - ['-m', ext_name]+f_sources) - else: - log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\ - % (target_file)) - - if not os.path.isfile(target_file): - raise DistutilsError("f2py target file %r not generated" % (target_file,)) - - build_dir = os.path.join(self.build_src, target_dir) - target_c = os.path.join(build_dir, 'fortranobject.c') - target_h = os.path.join(build_dir, 'fortranobject.h') - log.info(" adding '%s' to sources." % (target_c)) - new_sources.append(target_c) - if build_dir not in extension.include_dirs: - log.info(" adding '%s' to include_dirs." % (build_dir)) - extension.include_dirs.append(build_dir) - - if not skip_f2py: - import numpy.f2py - d = os.path.dirname(numpy.f2py.__file__) - source_c = os.path.join(d, 'src', 'fortranobject.c') - source_h = os.path.join(d, 'src', 'fortranobject.h') - if newer(source_c, target_c) or newer(source_h, target_h): - self.mkpath(os.path.dirname(target_c)) - self.copy_file(source_c, target_c) - self.copy_file(source_h, target_h) - else: - if not os.path.isfile(target_c): - raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,)) - if not os.path.isfile(target_h): - raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,)) - - for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']: - filename = os.path.join(target_dir, ext_name + name_ext) - if os.path.isfile(filename): - log.info(" adding '%s' to sources." % (filename)) - f_sources.append(filename) - - return new_sources + f_sources - - def swig_sources(self, sources, extension): - # Assuming SWIG 1.3.14 or later. See compatibility note in - # http://www.swig.org/Doc1.3/Python.html#Python_nn6 - - new_sources = [] - swig_sources = [] - swig_targets = {} - target_dirs = [] - py_files = [] # swig generated .py files - target_ext = '.c' - if '-c++' in extension.swig_opts: - typ = 'c++' - is_cpp = True - extension.swig_opts.remove('-c++') - elif self.swig_cpp: - typ = 'c++' - is_cpp = True - else: - typ = None - is_cpp = False - skip_swig = 0 - ext_name = extension.name.split('.')[-1] - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.i': # SWIG interface file - # the code below assumes that the sources list - # contains not more than one .i SWIG interface file - if self.inplace: - target_dir = os.path.dirname(base) - py_target_dir = self.ext_target_dir - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - py_target_dir = target_dir - if os.path.isfile(source): - name = get_swig_modulename(source) - if name != ext_name[1:]: - raise DistutilsSetupError( - 'mismatch of extension names: %s provides %r' - ' but expected %r' % (source, name, ext_name[1:])) - if typ is None: - typ = get_swig_target(source) - is_cpp = typ=='c++' - else: - typ2 = get_swig_target(source) - if typ2 is None: - log.warn('source %r does not define swig target, assuming %s swig target' \ - % (source, typ)) - elif typ!=typ2: - log.warn('expected %r but source %r defines %r swig target' \ - % (typ, source, typ2)) - if typ2=='c++': - log.warn('resetting swig target to c++ (some targets may have .c extension)') - is_cpp = True - else: - log.warn('assuming that %r has c++ swig target' % (source)) - if is_cpp: - target_ext = '.cpp' - target_file = os.path.join(target_dir, '%s_wrap%s' \ - % (name, target_ext)) - else: - log.warn(' source %s does not exist: skipping swig\'ing.' \ - % (source)) - name = ext_name[1:] - skip_swig = 1 - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %s_wrap.{c,cpp} was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.warn(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - new_sources.append(target_file) - py_files.append(os.path.join(py_target_dir, name+'.py')) - swig_sources.append(source) - swig_targets[source] = new_sources[-1] - else: - new_sources.append(source) - - if not swig_sources: - return new_sources - - if skip_swig: - return new_sources + py_files - - for d in target_dirs: - self.mkpath(d) - - swig = self.swig or self.find_swig() - swig_cmd = [swig, "-python"] + extension.swig_opts - if is_cpp: - swig_cmd.append('-c++') - for d in extension.include_dirs: - swig_cmd.append('-I'+d) - for source in swig_sources: - target = swig_targets[source] - depends = [source] + extension.depends - if self.force or newer_group(depends, target, 'newer'): - log.info("%s: %s" % (os.path.basename(swig) \ - + (is_cpp and '++' or ''), source)) - self.spawn(swig_cmd + self.swig_opts \ - + ["-o", target, '-outdir', py_target_dir, source]) - else: - log.debug(" skipping '%s' swig interface (up-to-date)" \ - % (source)) - - return new_sources + py_files - -_f_pyf_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match -_header_ext_match = re.compile(r'.*\.(inc|h|hpp)\Z', re.I).match - -#### SWIG related auxiliary functions #### -_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P[\w_]+)".*\)|)\s*(?P[\w_]+)', - re.I).match -_has_c_header = re.compile(r'-\*-\s*c\s*-\*-', re.I).search -_has_cpp_header = re.compile(r'-\*-\s*c\+\+\s*-\*-', re.I).search - -def get_swig_target(source): - with open(source) as f: - result = None - line = f.readline() - if _has_cpp_header(line): - result = 'c++' - if _has_c_header(line): - result = 'c' - return result - -def get_swig_modulename(source): - with open(source) as f: - name = None - for line in f: - m = _swig_module_name_match(line) - if m: - name = m.group('name') - break - return name - -def _find_swig_target(target_dir, name): - for ext in ['.cpp', '.c']: - target = os.path.join(target_dir, '%s_wrap%s' % (name, ext)) - if os.path.isfile(target): - break - return target - -#### F2PY related auxiliary functions #### - -_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', - re.I).match -_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' - r'__user__[\w_]*)', re.I).match - -def get_f2py_modulename(source): - name = None - with open(source) as f: - for line in f: - m = _f2py_module_name_match(line) - if m: - if _f2py_user_module_name_match(line): # skip *__user__* names - continue - name = m.group('name') - break - return name - -########################################## diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py deleted file mode 100644 index 8bdfb7ec5823..000000000000 --- a/numpy/distutils/command/config.py +++ /dev/null @@ -1,516 +0,0 @@ -# Added Fortran compiler support to config. Currently useful only for -# try_compile call. try_run works but is untested for most of Fortran -# compilers (they must define linker_exe first). -# Pearu Peterson -import os -import signal -import subprocess -import sys -import textwrap -import warnings - -from distutils.command.config import config as old_config -from distutils.command.config import LANG_EXT -from distutils import log -from distutils.file_util import copy_file -from distutils.ccompiler import CompileError, LinkError -import distutils -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.mingw32ccompiler import generate_manifest -from numpy.distutils.command.autodist import (check_gcc_function_attribute, - check_gcc_function_attribute_with_intrinsics, - check_gcc_variable_attribute, - check_gcc_version_at_least, - check_inline, - check_restrict, - check_compiler_gcc) - -LANG_EXT['f77'] = '.f' -LANG_EXT['f90'] = '.f90' - -class config(old_config): - old_config.user_options += [ - ('fcompiler=', None, "specify the Fortran compiler type"), - ] - - def initialize_options(self): - self.fcompiler = None - old_config.initialize_options(self) - - def _check_compiler (self): - old_config._check_compiler(self) - from numpy.distutils.fcompiler import FCompiler, new_fcompiler - - if sys.platform == 'win32' and (self.compiler.compiler_type in - ('msvc', 'intelw', 'intelemw')): - # XXX: hack to circumvent a python 2.6 bug with msvc9compiler: - # initialize call query_vcvarsall, which throws an OSError, and - # causes an error along the way without much information. We try to - # catch it here, hoping it is early enough, and print a helpful - # message instead of Error: None. - if not self.compiler.initialized: - try: - self.compiler.initialize() - except OSError as e: - msg = textwrap.dedent("""\ - Could not initialize compiler instance: do you have Visual Studio - installed? If you are trying to build with MinGW, please use "python setup.py - build -c mingw32" instead. If you have Visual Studio installed, check it is - correctly installed, and the right version (VS 2015 as of this writing). - - Original exception was: %s, and the Compiler class was %s - ============================================================================""") \ - % (e, self.compiler.__class__.__name__) - print(textwrap.dedent("""\ - ============================================================================""")) - raise distutils.errors.DistutilsPlatformError(msg) from e - - # After MSVC is initialized, add an explicit /MANIFEST to linker - # flags. See issues gh-4245 and gh-4101 for details. Also - # relevant are issues 4431 and 16296 on the Python bug tracker. - from distutils import msvc9compiler - if msvc9compiler.get_build_version() >= 10: - for ldflags in [self.compiler.ldflags_shared, - self.compiler.ldflags_shared_debug]: - if '/MANIFEST' not in ldflags: - ldflags.append('/MANIFEST') - - if not isinstance(self.fcompiler, FCompiler): - self.fcompiler = new_fcompiler(compiler=self.fcompiler, - dry_run=self.dry_run, force=1, - c_compiler=self.compiler) - if self.fcompiler is not None: - self.fcompiler.customize(self.distribution) - if self.fcompiler.get_version(): - self.fcompiler.customize_cmd(self) - self.fcompiler.show_customization() - - def _wrap_method(self, mth, lang, args): - from distutils.ccompiler import CompileError - from distutils.errors import DistutilsExecError - save_compiler = self.compiler - if lang in ['f77', 'f90']: - self.compiler = self.fcompiler - if self.compiler is None: - raise CompileError('%s compiler is not set' % (lang,)) - try: - ret = mth(*((self,)+args)) - except (DistutilsExecError, CompileError) as e: - self.compiler = save_compiler - raise CompileError from e - self.compiler = save_compiler - return ret - - def _compile (self, body, headers, include_dirs, lang): - src, obj = self._wrap_method(old_config._compile, lang, - (body, headers, include_dirs, lang)) - # _compile in unixcompiler.py sometimes creates .d dependency files. - # Clean them up. - self.temp_files.append(obj + '.d') - return src, obj - - def _link (self, body, - headers, include_dirs, - libraries, library_dirs, lang): - if self.compiler.compiler_type=='msvc': - libraries = (libraries or [])[:] - library_dirs = (library_dirs or [])[:] - if lang in ['f77', 'f90']: - lang = 'c' # always use system linker when using MSVC compiler - if self.fcompiler: - for d in self.fcompiler.library_dirs or []: - # correct path when compiling in Cygwin but with - # normal Win Python - if d.startswith('/usr/lib'): - try: - d = subprocess.check_output(['cygpath', - '-w', d]) - except (OSError, subprocess.CalledProcessError): - pass - else: - d = filepath_from_subprocess_output(d) - library_dirs.append(d) - for libname in self.fcompiler.libraries or []: - if libname not in libraries: - libraries.append(libname) - for libname in libraries: - if libname.startswith('msvc'): continue - fileexists = False - for libdir in library_dirs or []: - libfile = os.path.join(libdir, '%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in library_dirs: - libfile = os.path.join(libdir, 'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(libdir, '%s.lib' % (libname)) - copy_file(libfile, libfile2) - self.temp_files.append(libfile2) - fileexists = True - break - if fileexists: continue - log.warn('could not find library %r in directories %s' \ - % (libname, library_dirs)) - elif self.compiler.compiler_type == 'mingw32': - generate_manifest(self) - return self._wrap_method(old_config._link, lang, - (body, headers, include_dirs, - libraries, library_dirs, lang)) - - def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): - self._check_compiler() - return self.try_compile( - "/* we need a dummy line to make distutils happy */", - [header], include_dirs) - - def check_decl(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = textwrap.dedent(""" - int main(void) - { - #ifndef %s - (void) %s; - #endif - ; - return 0; - }""") % (symbol, symbol) - - return self.try_compile(body, headers, include_dirs) - - def check_macro_true(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = textwrap.dedent(""" - int main(void) - { - #if %s - #else - #error false or undefined macro - #endif - ; - return 0; - }""") % (symbol,) - - return self.try_compile(body, headers, include_dirs) - - def check_type(self, type_name, headers=None, include_dirs=None, - library_dirs=None): - """Check type availability. Return True if the type can be compiled, - False otherwise""" - self._check_compiler() - - # First check the type can be compiled - body = textwrap.dedent(r""" - int main(void) { - if ((%(name)s *) 0) - return 0; - if (sizeof (%(name)s)) - return 0; - } - """) % {'name': type_name} - - st = False - try: - try: - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - st = True - except distutils.errors.CompileError: - st = False - finally: - self._clean() - - return st - - def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None): - """Check size of a given type.""" - self._check_compiler() - - # First check the type can be compiled - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)]; - test_array [0] = 0 - - ; - return 0; - } - """) - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - self._clean() - - if expected: - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)]; - test_array [0] = 0 - - ; - return 0; - } - """) - for size in expected: - try: - self._compile(body % {'type': type_name, 'size': size}, - headers, include_dirs, 'c') - self._clean() - return size - except CompileError: - pass - - # this fails to *compile* if size > sizeof(type) - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)]; - test_array [0] = 0 - - ; - return 0; - } - """) - - # The principle is simple: we first find low and high bounds of size - # for the type, where low/high are looked up on a log scale. Then, we - # do a binary search to find the exact size between low and high - low = 0 - mid = 0 - while True: - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - break - except CompileError: - #log.info("failure to test for bound %d" % mid) - low = mid + 1 - mid = 2 * mid + 1 - - high = mid - # Binary search: - while low != high: - mid = (high - low) // 2 + low - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - high = mid - except CompileError: - low = mid + 1 - return low - - def check_func(self, func, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - # clean up distutils's config a bit: add void to main(), and - # return a value. - self._check_compiler() - body = [] - if decl: - if type(decl) == str: - body.append(decl) - else: - body.append("int %s (void);" % func) - # Handle MSVC intrinsics: force MS compiler to make a function call. - # Useful to test for some functions when built with optimization on, to - # avoid build error because the intrinsic and our 'fake' test - # declaration do not match. - body.append("#ifdef _MSC_VER") - body.append("#pragma function(%s)" % func) - body.append("#endif") - body.append("int main (void) {") - if call: - if call_args is None: - call_args = '' - body.append(" %s(%s);" % (func, call_args)) - else: - body.append(" %s;" % func) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_funcs_once(self, funcs, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - """Check a list of functions at once. - - This is useful to speed up things, since all the functions in the funcs - list will be put in one compilation unit. - - Arguments - --------- - funcs : seq - list of functions to test - include_dirs : seq - list of header paths - libraries : seq - list of libraries to link the code snippet to - library_dirs : seq - list of library paths - decl : dict - for every (key, value), the declaration in the value will be - used for function in key. If a function is not in the - dictionary, no declaration will be used. - call : dict - for every item (f, value), if the value is True, a call will be - done to the function f. - """ - self._check_compiler() - body = [] - if decl: - for f, v in decl.items(): - if v: - body.append("int %s (void);" % f) - - # Handle MS intrinsics. See check_func for more info. - body.append("#ifdef _MSC_VER") - for func in funcs: - body.append("#pragma function(%s)" % func) - body.append("#endif") - - body.append("int main (void) {") - if call: - for f in funcs: - if f in call and call[f]: - if not (call_args and f in call_args and call_args[f]): - args = '' - else: - args = call_args[f] - body.append(" %s(%s);" % (f, args)) - else: - body.append(" %s;" % f) - else: - for f in funcs: - body.append(" %s;" % f) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_inline(self): - """Return the inline keyword recognized by the compiler, empty string - otherwise.""" - return check_inline(self) - - def check_restrict(self): - """Return the restrict keyword recognized by the compiler, empty string - otherwise.""" - return check_restrict(self) - - def check_compiler_gcc(self): - """Return True if the C compiler is gcc""" - return check_compiler_gcc(self) - - def check_gcc_function_attribute(self, attribute, name): - return check_gcc_function_attribute(self, attribute, name) - - def check_gcc_function_attribute_with_intrinsics(self, attribute, name, - code, include): - return check_gcc_function_attribute_with_intrinsics(self, attribute, - name, code, include) - - def check_gcc_variable_attribute(self, attribute): - return check_gcc_variable_attribute(self, attribute) - - def check_gcc_version_at_least(self, major, minor=0, patchlevel=0): - """Return True if the GCC version is greater than or equal to the - specified version.""" - return check_gcc_version_at_least(self, major, minor, patchlevel) - - def get_output(self, body, headers=None, include_dirs=None, - libraries=None, library_dirs=None, - lang="c", use_tee=None): - """Try to compile, link to an executable, and run a program - built from 'body' and 'headers'. Returns the exit status code - of the program and its output. - """ - # 2008-11-16, RemoveMe - warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" - "Usage of get_output is deprecated: please do not \n" - "use it anymore, and avoid configuration checks \n" - "involving running executable on the target machine.\n" - "+++++++++++++++++++++++++++++++++++++++++++++++++\n", - DeprecationWarning, stacklevel=2) - self._check_compiler() - exitcode, output = 255, '' - try: - grabber = GrabStdout() - try: - src, obj, exe = self._link(body, headers, include_dirs, - libraries, library_dirs, lang) - grabber.restore() - except Exception: - output = grabber.data - grabber.restore() - raise - exe = os.path.join('.', exe) - try: - # specify cwd arg for consistency with - # historic usage pattern of exec_command() - # also, note that exe appears to be a string, - # which exec_command() handled, but we now - # use a list for check_output() -- this assumes - # that exe is always a single command - output = subprocess.check_output([exe], cwd='.') - except subprocess.CalledProcessError as exc: - exitstatus = exc.returncode - output = '' - except OSError: - # preserve the EnvironmentError exit status - # used historically in exec_command() - exitstatus = 127 - output = '' - else: - output = filepath_from_subprocess_output(output) - if hasattr(os, 'WEXITSTATUS'): - exitcode = os.WEXITSTATUS(exitstatus) - if os.WIFSIGNALED(exitstatus): - sig = os.WTERMSIG(exitstatus) - log.error('subprocess exited with signal %d' % (sig,)) - if sig == signal.SIGINT: - # control-C - raise KeyboardInterrupt - else: - exitcode = exitstatus - log.info("success!") - except (CompileError, LinkError): - log.info("failure.") - self._clean() - return exitcode, output - -class GrabStdout: - - def __init__(self): - self.sys_stdout = sys.stdout - self.data = '' - sys.stdout = self - - def write (self, data): - self.sys_stdout.write(data) - self.data += data - - def flush (self): - self.sys_stdout.flush() - - def restore(self): - sys.stdout = self.sys_stdout diff --git a/numpy/distutils/command/config_compiler.py b/numpy/distutils/command/config_compiler.py deleted file mode 100644 index ca4099886d8c..000000000000 --- a/numpy/distutils/command/config_compiler.py +++ /dev/null @@ -1,126 +0,0 @@ -from distutils.core import Command -from numpy.distutils import log - -#XXX: Linker flags - -def show_fortran_compilers(_cache=None): - # Using cache to prevent infinite recursion. - if _cache: - return - elif _cache is None: - _cache = [] - _cache.append(1) - from numpy.distutils.fcompiler import show_fcompilers - import distutils.core - dist = distutils.core._setup_distribution - show_fcompilers(dist) - -class config_fc(Command): - """ Distutils command to hold user specified options - to Fortran compilers. - - config_fc command is used by the FCompiler.customize() method. - """ - - description = "specify Fortran 77/Fortran 90 compiler information" - - user_options = [ - ('fcompiler=', None, "specify Fortran compiler type"), - ('f77exec=', None, "specify F77 compiler command"), - ('f90exec=', None, "specify F90 compiler command"), - ('f77flags=', None, "specify F77 compiler flags"), - ('f90flags=', None, "specify F90 compiler flags"), - ('opt=', None, "specify optimization flags"), - ('arch=', None, "specify architecture specific optimization flags"), - ('debug', 'g', "compile with debugging information"), - ('noopt', None, "compile without optimization"), - ('noarch', None, "compile without arch-dependent optimization"), - ] - - help_options = [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - boolean_options = ['debug', 'noopt', 'noarch'] - - def initialize_options(self): - self.fcompiler = None - self.f77exec = None - self.f90exec = None - self.f77flags = None - self.f90flags = None - self.opt = None - self.arch = None - self.debug = None - self.noopt = None - self.noarch = None - - def finalize_options(self): - log.info('unifying config_fc, config, build_clib, build_ext, build commands --fcompiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['fcompiler']: - l = [] - for c in cmd_list: - v = getattr(c, a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c, a) is None: setattr(c, a, v1) - - def run(self): - # Do nothing. - return - -class config_cc(Command): - """ Distutils command to hold user specified options - to C/C++ compilers. - """ - - description = "specify C/C++ compiler information" - - user_options = [ - ('compiler=', None, "specify C/C++ compiler type"), - ] - - def initialize_options(self): - self.compiler = None - - def finalize_options(self): - log.info('unifying config_cc, config, build_clib, build_ext, build commands --compiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['compiler']: - l = [] - for c in cmd_list: - v = getattr(c, a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c, a) is None: setattr(c, a, v1) - return - - def run(self): - # Do nothing. - return diff --git a/numpy/distutils/command/develop.py b/numpy/distutils/command/develop.py deleted file mode 100644 index af24baf2e7e1..000000000000 --- a/numpy/distutils/command/develop.py +++ /dev/null @@ -1,15 +0,0 @@ -""" Override the develop command from setuptools so we can ensure that our -generated files (from build_src or build_scripts) are properly converted to real -files with filenames. - -""" -from setuptools.command.develop import develop as old_develop - -class develop(old_develop): - __doc__ = old_develop.__doc__ - def install_for_development(self): - # Build sources in-place, too. - self.reinitialize_command('build_src', inplace=1) - # Make sure scripts are built. - self.run_command('build_scripts') - old_develop.install_for_development(self) diff --git a/numpy/distutils/command/egg_info.py b/numpy/distutils/command/egg_info.py deleted file mode 100644 index 14c62b4d1b90..000000000000 --- a/numpy/distutils/command/egg_info.py +++ /dev/null @@ -1,25 +0,0 @@ -import sys - -from setuptools.command.egg_info import egg_info as _egg_info - -class egg_info(_egg_info): - def run(self): - if 'sdist' in sys.argv: - import warnings - import textwrap - msg = textwrap.dedent(""" - `build_src` is being run, this may lead to missing - files in your sdist! You want to use distutils.sdist - instead of the setuptools version: - - from distutils.command.sdist import sdist - cmdclass={'sdist': sdist}" - - See numpy's setup.py or gh-7131 for details.""") - warnings.warn(msg, UserWarning, stacklevel=2) - - # We need to ensure that build_src has been executed in order to give - # setuptools' egg_info command real filenames instead of functions which - # generate files. - self.run_command("build_src") - _egg_info.run(self) diff --git a/numpy/distutils/command/install.py b/numpy/distutils/command/install.py deleted file mode 100644 index efa9b4740fc4..000000000000 --- a/numpy/distutils/command/install.py +++ /dev/null @@ -1,79 +0,0 @@ -import sys -if 'setuptools' in sys.modules: - import setuptools.command.install as old_install_mod - have_setuptools = True -else: - import distutils.command.install as old_install_mod - have_setuptools = False -from distutils.file_util import write_file - -old_install = old_install_mod.install - -class install(old_install): - - # Always run install_clib - the command is cheap, so no need to bypass it; - # but it's not run by setuptools -- so it's run again in install_data - sub_commands = old_install.sub_commands + [ - ('install_clib', lambda x: True) - ] - - def finalize_options (self): - old_install.finalize_options(self) - self.install_lib = self.install_libbase - - def setuptools_run(self): - """ The setuptools version of the .run() method. - - We must pull in the entire code so we can override the level used in the - _getframe() call since we wrap this call by one more level. - """ - from distutils.command.install import install as distutils_install - - # Explicit request for old-style install? Just do it - if self.old_and_unmanageable or self.single_version_externally_managed: - return distutils_install.run(self) - - # Attempt to detect whether we were called from setup() or by another - # command. If we were called by setup(), our caller will be the - # 'run_command' method in 'distutils.dist', and *its* caller will be - # the 'run_commands' method. If we were called any other way, our - # immediate caller *might* be 'run_command', but it won't have been - # called by 'run_commands'. This is slightly kludgy, but seems to - # work. - # - caller = sys._getframe(3) - caller_module = caller.f_globals.get('__name__', '') - caller_name = caller.f_code.co_name - - if caller_module != 'distutils.dist' or caller_name!='run_commands': - # We weren't called from the command line or setup(), so we - # should run in backward-compatibility mode to support bdist_* - # commands. - distutils_install.run(self) - else: - self.do_egg_install() - - def run(self): - if not have_setuptools: - r = old_install.run(self) - else: - r = self.setuptools_run() - if self.record: - # bdist_rpm fails when INSTALLED_FILES contains - # paths with spaces. Such paths must be enclosed - # with double-quotes. - with open(self.record) as f: - lines = [] - need_rewrite = False - for l in f: - l = l.rstrip() - if ' ' in l: - need_rewrite = True - l = '"%s"' % (l) - lines.append(l) - if need_rewrite: - self.execute(write_file, - (self.record, lines), - "re-writing list of installed files to '%s'" % - self.record) - return r diff --git a/numpy/distutils/command/install_clib.py b/numpy/distutils/command/install_clib.py deleted file mode 100644 index aa2e5594c3c2..000000000000 --- a/numpy/distutils/command/install_clib.py +++ /dev/null @@ -1,40 +0,0 @@ -import os -from distutils.core import Command -from distutils.ccompiler import new_compiler -from numpy.distutils.misc_util import get_cmd - -class install_clib(Command): - description = "Command to install installable C libraries" - - user_options = [] - - def initialize_options(self): - self.install_dir = None - self.outfiles = [] - - def finalize_options(self): - self.set_undefined_options('install', ('install_lib', 'install_dir')) - - def run (self): - build_clib_cmd = get_cmd("build_clib") - if not build_clib_cmd.build_clib: - # can happen if the user specified `--skip-build` - build_clib_cmd.finalize_options() - build_dir = build_clib_cmd.build_clib - - # We need the compiler to get the library name -> filename association - if not build_clib_cmd.compiler: - compiler = new_compiler(compiler=None) - compiler.customize(self.distribution) - else: - compiler = build_clib_cmd.compiler - - for l in self.distribution.installed_libraries: - target_dir = os.path.join(self.install_dir, l.target_dir) - name = compiler.library_filename(l.name) - source = os.path.join(build_dir, name) - self.mkpath(target_dir) - self.outfiles.append(self.copy_file(source, target_dir)[0]) - - def get_outputs(self): - return self.outfiles diff --git a/numpy/distutils/command/install_data.py b/numpy/distutils/command/install_data.py deleted file mode 100644 index 0a2e68ae192a..000000000000 --- a/numpy/distutils/command/install_data.py +++ /dev/null @@ -1,24 +0,0 @@ -import sys -have_setuptools = ('setuptools' in sys.modules) - -from distutils.command.install_data import install_data as old_install_data - -#data installer with improved intelligence over distutils -#data files are copied into the project directory instead -#of willy-nilly -class install_data (old_install_data): - - def run(self): - old_install_data.run(self) - - if have_setuptools: - # Run install_clib again, since setuptools does not run sub-commands - # of install automatically - self.run_command('install_clib') - - def finalize_options (self): - self.set_undefined_options('install', - ('install_lib', 'install_dir'), - ('root', 'root'), - ('force', 'force'), - ) diff --git a/numpy/distutils/command/install_headers.py b/numpy/distutils/command/install_headers.py deleted file mode 100644 index 91eba6f17c29..000000000000 --- a/numpy/distutils/command/install_headers.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -from distutils.command.install_headers import install_headers as old_install_headers - -class install_headers (old_install_headers): - - def run (self): - headers = self.distribution.headers - if not headers: - return - - prefix = os.path.dirname(self.install_dir) - for header in headers: - if isinstance(header, tuple): - # Kind of a hack, but I don't know where else to change this... - if header[0] == 'numpy._core': - header = ('numpy', header[1]) - if os.path.splitext(header[1])[1] == '.inc': - continue - d = os.path.join(*([prefix]+header[0].split('.'))) - header = header[1] - else: - d = self.install_dir - self.mkpath(d) - (out, _) = self.copy_file(header, d) - self.outfiles.append(out) diff --git a/numpy/distutils/command/sdist.py b/numpy/distutils/command/sdist.py deleted file mode 100644 index e34193883dea..000000000000 --- a/numpy/distutils/command/sdist.py +++ /dev/null @@ -1,27 +0,0 @@ -import sys -if 'setuptools' in sys.modules: - from setuptools.command.sdist import sdist as old_sdist -else: - from distutils.command.sdist import sdist as old_sdist - -from numpy.distutils.misc_util import get_data_files - -class sdist(old_sdist): - - def add_defaults (self): - old_sdist.add_defaults(self) - - dist = self.distribution - - if dist.has_data_files(): - for data in dist.data_files: - self.filelist.extend(get_data_files(data)) - - if dist.has_headers(): - headers = [] - for h in dist.headers: - if isinstance(h, str): headers.append(h) - else: headers.append(h[1]) - self.filelist.extend(headers) - - return diff --git a/numpy/distutils/core.py b/numpy/distutils/core.py deleted file mode 100644 index c4a14e59901f..000000000000 --- a/numpy/distutils/core.py +++ /dev/null @@ -1,215 +0,0 @@ -import sys -from distutils.core import Distribution - -if 'setuptools' in sys.modules: - have_setuptools = True - from setuptools import setup as old_setup - # easy_install imports math, it may be picked up from cwd - from setuptools.command import easy_install - try: - # very old versions of setuptools don't have this - from setuptools.command import bdist_egg - except ImportError: - have_setuptools = False -else: - from distutils.core import setup as old_setup - have_setuptools = False - -import warnings -import distutils.core -import distutils.dist - -from numpy.distutils.extension import Extension # noqa: F401 -from numpy.distutils.numpy_distribution import NumpyDistribution -from numpy.distutils.command import config, config_compiler, \ - build, build_py, build_ext, build_clib, build_src, build_scripts, \ - sdist, install_data, install_headers, install, bdist_rpm, \ - install_clib -from numpy.distutils.misc_util import is_sequence, is_string - -numpy_cmdclass = {'build': build.build, - 'build_src': build_src.build_src, - 'build_scripts': build_scripts.build_scripts, - 'config_cc': config_compiler.config_cc, - 'config_fc': config_compiler.config_fc, - 'config': config.config, - 'build_ext': build_ext.build_ext, - 'build_py': build_py.build_py, - 'build_clib': build_clib.build_clib, - 'sdist': sdist.sdist, - 'install_data': install_data.install_data, - 'install_headers': install_headers.install_headers, - 'install_clib': install_clib.install_clib, - 'install': install.install, - 'bdist_rpm': bdist_rpm.bdist_rpm, - } -if have_setuptools: - # Use our own versions of develop and egg_info to ensure that build_src is - # handled appropriately. - from numpy.distutils.command import develop, egg_info - numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg - numpy_cmdclass['develop'] = develop.develop - numpy_cmdclass['easy_install'] = easy_install.easy_install - numpy_cmdclass['egg_info'] = egg_info.egg_info - -def _dict_append(d, **kws): - for k, v in kws.items(): - if k not in d: - d[k] = v - continue - dv = d[k] - if isinstance(dv, tuple): - d[k] = dv + tuple(v) - elif isinstance(dv, list): - d[k] = dv + list(v) - elif isinstance(dv, dict): - _dict_append(dv, **v) - elif is_string(dv): - d[k] = dv + v - else: - raise TypeError(repr(type(dv))) - -def _command_line_ok(_cache=None): - """ Return True if command line does not contain any - help or display requests. - """ - if _cache: - return _cache[0] - elif _cache is None: - _cache = [] - ok = True - display_opts = ['--'+n for n in Distribution.display_option_names] - for o in Distribution.display_options: - if o[1]: - display_opts.append('-'+o[1]) - for arg in sys.argv: - if arg.startswith('--help') or arg=='-h' or arg in display_opts: - ok = False - break - _cache.append(ok) - return ok - -def get_distribution(always=False): - dist = distutils.core._setup_distribution - # XXX Hack to get numpy installable with easy_install. - # The problem is easy_install runs it's own setup(), which - # sets up distutils.core._setup_distribution. However, - # when our setup() runs, that gets overwritten and lost. - # We can't use isinstance, as the DistributionWithoutHelpCommands - # class is local to a function in setuptools.command.easy_install - if dist is not None and \ - 'DistributionWithoutHelpCommands' in repr(dist): - dist = None - if always and dist is None: - dist = NumpyDistribution() - return dist - -def setup(**attr): - - cmdclass = numpy_cmdclass.copy() - - new_attr = attr.copy() - if 'cmdclass' in new_attr: - cmdclass.update(new_attr['cmdclass']) - new_attr['cmdclass'] = cmdclass - - if 'configuration' in new_attr: - # To avoid calling configuration if there are any errors - # or help request in command in the line. - configuration = new_attr.pop('configuration') - - old_dist = distutils.core._setup_distribution - old_stop = distutils.core._setup_stop_after - distutils.core._setup_distribution = None - distutils.core._setup_stop_after = "commandline" - try: - dist = setup(**new_attr) - finally: - distutils.core._setup_distribution = old_dist - distutils.core._setup_stop_after = old_stop - if dist.help or not _command_line_ok(): - # probably displayed help, skip running any commands - return dist - - # create setup dictionary and append to new_attr - config = configuration() - if hasattr(config, 'todict'): - config = config.todict() - _dict_append(new_attr, **config) - - # Move extension source libraries to libraries - libraries = [] - for ext in new_attr.get('ext_modules', []): - new_libraries = [] - for item in ext.libraries: - if is_sequence(item): - lib_name, build_info = item - _check_append_ext_library(libraries, lib_name, build_info) - new_libraries.append(lib_name) - elif is_string(item): - new_libraries.append(item) - else: - raise TypeError("invalid description of extension module " - "library %r" % (item,)) - ext.libraries = new_libraries - if libraries: - if 'libraries' not in new_attr: - new_attr['libraries'] = [] - for item in libraries: - _check_append_library(new_attr['libraries'], item) - - # sources in ext_modules or libraries may contain header files - if ('ext_modules' in new_attr or 'libraries' in new_attr) \ - and 'headers' not in new_attr: - new_attr['headers'] = [] - - # Use our custom NumpyDistribution class instead of distutils' one - new_attr['distclass'] = NumpyDistribution - - return old_setup(**new_attr) - -def _check_append_library(libraries, item): - for libitem in libraries: - if is_sequence(libitem): - if is_sequence(item): - if item[0]==libitem[0]: - if item[1] is libitem[1]: - return - warnings.warn("[0] libraries list contains %r with" - " different build_info" % (item[0],), - stacklevel=2) - break - else: - if item==libitem[0]: - warnings.warn("[1] libraries list contains %r with" - " no build_info" % (item[0],), - stacklevel=2) - break - else: - if is_sequence(item): - if item[0]==libitem: - warnings.warn("[2] libraries list contains %r with" - " no build_info" % (item[0],), - stacklevel=2) - break - else: - if item==libitem: - return - libraries.append(item) - -def _check_append_ext_library(libraries, lib_name, build_info): - for item in libraries: - if is_sequence(item): - if item[0]==lib_name: - if item[1] is build_info: - return - warnings.warn("[3] libraries list contains %r with" - " different build_info" % (lib_name,), - stacklevel=2) - break - elif item==lib_name: - warnings.warn("[4] libraries list contains %r with" - " no build_info" % (lib_name,), - stacklevel=2) - break - libraries.append((lib_name, build_info)) diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py deleted file mode 100644 index 77620210981d..000000000000 --- a/numpy/distutils/cpuinfo.py +++ /dev/null @@ -1,683 +0,0 @@ -#!/usr/bin/env python3 -""" -cpuinfo - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -Pearu Peterson - -""" -__all__ = ['cpu'] - -import os -import platform -import re -import sys -import types -import warnings - -from subprocess import getstatusoutput - - -def getoutput(cmd, successful_status=(0,), stacklevel=1): - try: - status, output = getstatusoutput(cmd) - except OSError as e: - warnings.warn(str(e), UserWarning, stacklevel=stacklevel) - return False, "" - if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: - return True, output - return False, output - -def command_info(successful_status=(0,), stacklevel=1, **kw): - info = {} - for key in kw: - ok, output = getoutput(kw[key], successful_status=successful_status, - stacklevel=stacklevel+1) - if ok: - info[key] = output.strip() - return info - -def command_by_line(cmd, successful_status=(0,), stacklevel=1): - ok, output = getoutput(cmd, successful_status=successful_status, - stacklevel=stacklevel+1) - if not ok: - return - for line in output.splitlines(): - yield line.strip() - -def key_value_from_command(cmd, sep, successful_status=(0,), - stacklevel=1): - d = {} - for line in command_by_line(cmd, successful_status=successful_status, - stacklevel=stacklevel+1): - l = [s.strip() for s in line.split(sep, 1)] - if len(l) == 2: - d[l[0]] = l[1] - return d - -class CPUInfoBase: - """Holds CPU information and provides methods for requiring - the availability of various CPU features. - """ - - def _try_call(self, func): - try: - return func() - except Exception: - pass - - def __getattr__(self, name): - if not name.startswith('_'): - if hasattr(self, '_'+name): - attr = getattr(self, '_'+name) - if isinstance(attr, types.MethodType): - return lambda func=self._try_call,attr=attr : func(attr) - else: - return lambda : None - raise AttributeError(name) - - def _getNCPUs(self): - return 1 - - def __get_nbits(self): - abits = platform.architecture()[0] - nbits = re.compile(r'(\d+)bit').search(abits).group(1) - return nbits - - def _is_32bit(self): - return self.__get_nbits() == '32' - - def _is_64bit(self): - return self.__get_nbits() == '64' - -class LinuxCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = [ {} ] - ok, output = getoutput('uname -m') - if ok: - info[0]['uname_m'] = output.strip() - try: - fo = open('/proc/cpuinfo') - except OSError as e: - warnings.warn(str(e), UserWarning, stacklevel=2) - else: - for line in fo: - name_value = [s.strip() for s in line.split(':', 1)] - if len(name_value) != 2: - continue - name, value = name_value - if not info or name in info[-1]: # next processor - info.append({}) - info[-1][name] = value - fo.close() - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['vendor_id']=='AuthenticAMD' - - def _is_AthlonK6_2(self): - return self._is_AMD() and self.info[0]['model'] == '2' - - def _is_AthlonK6_3(self): - return self._is_AMD() and self.info[0]['model'] == '3' - - def _is_AthlonK6(self): - return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None - - def _is_AthlonK7(self): - return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None - - def _is_AthlonMP(self): - return re.match(r'.*?Athlon\(tm\) MP\b', - self.info[0]['model name']) is not None - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['family'] == '15' - - def _is_Athlon64(self): - return re.match(r'.*?Athlon\(tm\) 64\b', - self.info[0]['model name']) is not None - - def _is_AthlonHX(self): - return re.match(r'.*?Athlon HX\b', - self.info[0]['model name']) is not None - - def _is_Opteron(self): - return re.match(r'.*?Opteron\b', - self.info[0]['model name']) is not None - - def _is_Hammer(self): - return re.match(r'.*?Hammer\b', - self.info[0]['model name']) is not None - - # Alpha - - def _is_Alpha(self): - return self.info[0]['cpu']=='Alpha' - - def _is_EV4(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' - - def _is_EV5(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' - - def _is_EV56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' - - def _is_PCA56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' - - # Intel - - #XXX - _is_i386 = _not_impl - - def _is_Intel(self): - return self.info[0]['vendor_id']=='GenuineIntel' - - def _is_i486(self): - return self.info[0]['cpu']=='i486' - - def _is_i586(self): - return self.is_Intel() and self.info[0]['cpu family'] == '5' - - def _is_i686(self): - return self.is_Intel() and self.info[0]['cpu family'] == '6' - - def _is_Celeron(self): - return re.match(r'.*?Celeron', - self.info[0]['model name']) is not None - - def _is_Pentium(self): - return re.match(r'.*?Pentium', - self.info[0]['model name']) is not None - - def _is_PentiumII(self): - return re.match(r'.*?Pentium.*?II\b', - self.info[0]['model name']) is not None - - def _is_PentiumPro(self): - return re.match(r'.*?PentiumPro\b', - self.info[0]['model name']) is not None - - def _is_PentiumMMX(self): - return re.match(r'.*?Pentium.*?MMX\b', - self.info[0]['model name']) is not None - - def _is_PentiumIII(self): - return re.match(r'.*?Pentium.*?III\b', - self.info[0]['model name']) is not None - - def _is_PentiumIV(self): - return re.match(r'.*?Pentium.*?(IV|4)\b', - self.info[0]['model name']) is not None - - def _is_PentiumM(self): - return re.match(r'.*?Pentium.*?M\b', - self.info[0]['model name']) is not None - - def _is_Prescott(self): - return self.is_PentiumIV() and self.has_sse3() - - def _is_Nocona(self): - return (self.is_Intel() - and (self.info[0]['cpu family'] == '6' - or self.info[0]['cpu family'] == '15') - and (self.has_sse3() and not self.has_ssse3()) - and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None) - - def _is_Core2(self): - return (self.is_64bit() and self.is_Intel() and - re.match(r'.*?Core\(TM\)2\b', - self.info[0]['model name']) is not None) - - def _is_Itanium(self): - return re.match(r'.*?Itanium\b', - self.info[0]['family']) is not None - - def _is_XEON(self): - return re.match(r'.*?XEON\b', - self.info[0]['model name'], re.IGNORECASE) is not None - - _is_Xeon = _is_XEON - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_fdiv_bug(self): - return self.info[0]['fdiv_bug']=='yes' - - def _has_f00f_bug(self): - return self.info[0]['f00f_bug']=='yes' - - def _has_mmx(self): - return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None - - def _has_sse(self): - return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None - - def _has_sse2(self): - return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None - - def _has_sse3(self): - return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None - - def _has_ssse3(self): - return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None - - def _has_3dnow(self): - return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None - - def _has_3dnowext(self): - return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None - -class IRIXCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = key_value_from_command('sysconf', sep=' ', - successful_status=(0, 1)) - self.__class__.info = info - - def _not_impl(self): pass - - def _is_singleCPU(self): - return self.info.get('NUM_PROCESSORS') == '1' - - def _getNCPUs(self): - return int(self.info.get('NUM_PROCESSORS', 1)) - - def __cputype(self, n): - return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) - def _is_r2000(self): return self.__cputype(2000) - def _is_r3000(self): return self.__cputype(3000) - def _is_r3900(self): return self.__cputype(3900) - def _is_r4000(self): return self.__cputype(4000) - def _is_r4100(self): return self.__cputype(4100) - def _is_r4300(self): return self.__cputype(4300) - def _is_r4400(self): return self.__cputype(4400) - def _is_r4600(self): return self.__cputype(4600) - def _is_r4650(self): return self.__cputype(4650) - def _is_r5000(self): return self.__cputype(5000) - def _is_r6000(self): return self.__cputype(6000) - def _is_r8000(self): return self.__cputype(8000) - def _is_r10000(self): return self.__cputype(10000) - def _is_r12000(self): return self.__cputype(12000) - def _is_rorion(self): return self.__cputype('orion') - - def get_ip(self): - try: return self.info.get('MACHINE') - except Exception: pass - def __machine(self, n): - return self.info.get('MACHINE').lower() == 'ip%s' % (n) - def _is_IP19(self): return self.__machine(19) - def _is_IP20(self): return self.__machine(20) - def _is_IP21(self): return self.__machine(21) - def _is_IP22(self): return self.__machine(22) - def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000() - def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000() - def _is_IP24(self): return self.__machine(24) - def _is_IP25(self): return self.__machine(25) - def _is_IP26(self): return self.__machine(26) - def _is_IP27(self): return self.__machine(27) - def _is_IP28(self): return self.__machine(28) - def _is_IP30(self): return self.__machine(30) - def _is_IP32(self): return self.__machine(32) - def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000() - def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000() - - -class DarwinCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - machine='machine') - info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=') - self.__class__.info = info - - def _not_impl(self): pass - - def _getNCPUs(self): - return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) - - def _is_Power_Macintosh(self): - return self.info['sysctl_hw']['hw.machine']=='Power Macintosh' - - def _is_i386(self): - return self.info['arch']=='i386' - def _is_ppc(self): - return self.info['arch']=='ppc' - - def __machine(self, n): - return self.info['machine'] == 'ppc%s'%n - def _is_ppc601(self): return self.__machine(601) - def _is_ppc602(self): return self.__machine(602) - def _is_ppc603(self): return self.__machine(603) - def _is_ppc603e(self): return self.__machine('603e') - def _is_ppc604(self): return self.__machine(604) - def _is_ppc604e(self): return self.__machine('604e') - def _is_ppc620(self): return self.__machine(620) - def _is_ppc630(self): return self.__machine(630) - def _is_ppc740(self): return self.__machine(740) - def _is_ppc7400(self): return self.__machine(7400) - def _is_ppc7450(self): return self.__machine(7450) - def _is_ppc750(self): return self.__machine(750) - def _is_ppc403(self): return self.__machine(403) - def _is_ppc505(self): return self.__machine(505) - def _is_ppc801(self): return self.__machine(801) - def _is_ppc821(self): return self.__machine(821) - def _is_ppc823(self): return self.__machine(823) - def _is_ppc860(self): return self.__machine(860) - - -class SunOSCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - mach='mach', - uname_i='uname_i', - isainfo_b='isainfo -b', - isainfo_n='isainfo -n', - ) - info['uname_X'] = key_value_from_command('uname -X', sep='=') - for line in command_by_line('psrinfo -v 0'): - m = re.match(r'\s*The (?P

[\w\d]+) processor operates at', line) - if m: - info['processor'] = m.group('p') - break - self.__class__.info = info - - def _not_impl(self): pass - - def _is_i386(self): - return self.info['isainfo_n']=='i386' - def _is_sparc(self): - return self.info['isainfo_n']=='sparc' - def _is_sparcv9(self): - return self.info['isainfo_n']=='sparcv9' - - def _getNCPUs(self): - return int(self.info['uname_X'].get('NumCPU', 1)) - - def _is_sun4(self): - return self.info['arch']=='sun4' - - def _is_SUNW(self): - return re.match(r'SUNW', self.info['uname_i']) is not None - def _is_sparcstation5(self): - return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None - def _is_ultra1(self): - return re.match(r'.*Ultra-1', self.info['uname_i']) is not None - def _is_ultra250(self): - return re.match(r'.*Ultra-250', self.info['uname_i']) is not None - def _is_ultra2(self): - return re.match(r'.*Ultra-2', self.info['uname_i']) is not None - def _is_ultra30(self): - return re.match(r'.*Ultra-30', self.info['uname_i']) is not None - def _is_ultra4(self): - return re.match(r'.*Ultra-4', self.info['uname_i']) is not None - def _is_ultra5_10(self): - return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None - def _is_ultra5(self): - return re.match(r'.*Ultra-5', self.info['uname_i']) is not None - def _is_ultra60(self): - return re.match(r'.*Ultra-60', self.info['uname_i']) is not None - def _is_ultra80(self): - return re.match(r'.*Ultra-80', self.info['uname_i']) is not None - def _is_ultraenterprice(self): - return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None - def _is_ultraenterprice10k(self): - return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None - def _is_sunfire(self): - return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None - def _is_ultra(self): - return re.match(r'.*Ultra', self.info['uname_i']) is not None - - def _is_cpusparcv7(self): - return self.info['processor']=='sparcv7' - def _is_cpusparcv8(self): - return self.info['processor']=='sparcv8' - def _is_cpusparcv9(self): - return self.info['processor']=='sparcv9' - -class Win32CPUInfo(CPUInfoBase): - - info = None - pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" - # XXX: what does the value of - # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 - # mean? - - def __init__(self): - if self.info is not None: - return - info = [] - try: - #XXX: Bad style to use so long `try:...except:...`. Fix it! - import winreg - - prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)" - r"\s+stepping\s+(?P\d+)", re.IGNORECASE) - chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey) - pnum=0 - while True: - try: - proc=winreg.EnumKey(chnd, pnum) - except winreg.error: - break - else: - pnum+=1 - info.append({"Processor":proc}) - phnd=winreg.OpenKey(chnd, proc) - pidx=0 - while True: - try: - name, value, vtpe=winreg.EnumValue(phnd, pidx) - except winreg.error: - break - else: - pidx=pidx+1 - info[-1][name]=value - if name=="Identifier": - srch=prgx.search(value) - if srch: - info[-1]["Family"]=int(srch.group("FML")) - info[-1]["Model"]=int(srch.group("MDL")) - info[-1]["Stepping"]=int(srch.group("STP")) - except Exception as e: - print(e, '(ignoring)') - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['VendorIdentifier']=='AuthenticAMD' - - def _is_Am486(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_Am5x86(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_AMDK5(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [0, 1, 2, 3] - - def _is_AMDK6(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [6, 7] - - def _is_AMDK6_2(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==8 - - def _is_AMDK6_3(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==9 - - def _is_AMDK7(self): - return self.is_AMD() and self.info[0]['Family'] == 6 - - # To reliably distinguish between the different types of AMD64 chips - # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would - # require looking at the 'brand' from cpuid - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['Family'] == 15 - - # Intel - - def _is_Intel(self): - return self.info[0]['VendorIdentifier']=='GenuineIntel' - - def _is_i386(self): - return self.info[0]['Family']==3 - - def _is_i486(self): - return self.info[0]['Family']==4 - - def _is_i586(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_i686(self): - return self.is_Intel() and self.info[0]['Family']==6 - - def _is_Pentium(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_PentiumMMX(self): - return self.is_Intel() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==4 - - def _is_PentiumPro(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model']==1 - - def _is_PentiumII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [3, 5, 6] - - def _is_PentiumIII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [7, 8, 9, 10, 11] - - def _is_PentiumIV(self): - return self.is_Intel() and self.info[0]['Family']==15 - - def _is_PentiumM(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [9, 13, 14] - - def _is_Core2(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [15, 16, 17] - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_mmx(self): - if self.is_Intel(): - return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ - or (self.info[0]['Family'] in [6, 15]) - elif self.is_AMD(): - return self.info[0]['Family'] in [5, 6, 15] - else: - return False - - def _has_sse(self): - if self.is_Intel(): - return ((self.info[0]['Family']==6 and - self.info[0]['Model'] in [7, 8, 9, 10, 11]) - or self.info[0]['Family']==15) - elif self.is_AMD(): - return ((self.info[0]['Family']==6 and - self.info[0]['Model'] in [6, 7, 8, 10]) - or self.info[0]['Family']==15) - else: - return False - - def _has_sse2(self): - if self.is_Intel(): - return self.is_Pentium4() or self.is_PentiumM() \ - or self.is_Core2() - elif self.is_AMD(): - return self.is_AMD64() - else: - return False - - def _has_3dnow(self): - return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] - - def _has_3dnowext(self): - return self.is_AMD() and self.info[0]['Family'] in [6, 15] - -if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) - cpuinfo = LinuxCPUInfo -elif sys.platform.startswith('irix'): - cpuinfo = IRIXCPUInfo -elif sys.platform == 'darwin': - cpuinfo = DarwinCPUInfo -elif sys.platform.startswith('sunos'): - cpuinfo = SunOSCPUInfo -elif sys.platform.startswith('win32'): - cpuinfo = Win32CPUInfo -elif sys.platform.startswith('cygwin'): - cpuinfo = LinuxCPUInfo -#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. -else: - cpuinfo = CPUInfoBase - -cpu = cpuinfo() - -#if __name__ == "__main__": -# -# cpu.is_blaa() -# cpu.is_Intel() -# cpu.is_Alpha() -# -# print('CPU information:'), -# for name in dir(cpuinfo): -# if name[0]=='_' and name[1]!='_': -# r = getattr(cpu,name[1:])() -# if r: -# if r!=1: -# print('%s=%s' %(name[1:],r)) -# else: -# print(name[1:]), -# print() diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py deleted file mode 100644 index c701465d9ade..000000000000 --- a/numpy/distutils/exec_command.py +++ /dev/null @@ -1,315 +0,0 @@ -""" -exec_command - -Implements exec_command function that is (almost) equivalent to -commands.getstatusoutput function but on NT, DOS systems the -returned status is actually correct (though, the returned status -values may be different by a factor). In addition, exec_command -takes keyword arguments for (re-)defining environment variables. - -Provides functions: - - exec_command --- execute command in a specified directory and - in the modified environment. - find_executable --- locate a command using info from environment - variable PATH. Equivalent to posix `which` - command. - -Author: Pearu Peterson -Created: 11 January 2003 - -Requires: Python 2.x - -Successfully tested on: - -======== ============ ================================================= -os.name sys.platform comments -======== ============ ================================================= -posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3 - PyCrust 0.9.3, Idle 1.0.2 -posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2 -posix sunos5 SunOS 5.9, Python 2.2, 2.3.2 -posix darwin Darwin 7.2.0, Python 2.3 -nt win32 Windows Me - Python 2.3(EE), Idle 1.0, PyCrust 0.7.2 - Python 2.1.1 Idle 0.8 -nt win32 Windows 98, Python 2.1.1. Idle 0.8 -nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests - fail i.e. redefining environment variables may - not work. FIXED: don't use cygwin echo! - Comment: also `cmd /c echo` will not work - but redefining environment variables do work. -posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special) -nt win32 Windows XP, Python 2.3.3 -======== ============ ================================================= - -Known bugs: - -* Tests, that send messages to stderr, fail when executed from MSYS prompt - because the messages are lost at some point. - -""" -__all__ = ['exec_command', 'find_executable'] - -import os -import sys -import subprocess -import locale -import warnings - -from numpy.distutils.misc_util import is_sequence, make_temp_file -from numpy.distutils import log - -def filepath_from_subprocess_output(output): - """ - Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`. - - Inherited from `exec_command`, and possibly incorrect. - """ - mylocale = locale.getpreferredencoding(False) - if mylocale is None: - mylocale = 'ascii' - output = output.decode(mylocale, errors='replace') - output = output.replace('\r\n', '\n') - # Another historical oddity - if output[-1:] == '\n': - output = output[:-1] - return output - - -def forward_bytes_to_stdout(val): - """ - Forward bytes from a subprocess call to the console, without attempting to - decode them. - - The assumption is that the subprocess call already returned bytes in - a suitable encoding. - """ - if hasattr(sys.stdout, 'buffer'): - # use the underlying binary output if there is one - sys.stdout.buffer.write(val) - elif hasattr(sys.stdout, 'encoding'): - # round-trip the encoding if necessary - sys.stdout.write(val.decode(sys.stdout.encoding)) - else: - # make a best-guess at the encoding - sys.stdout.write(val.decode('utf8', errors='replace')) - - -def temp_file_name(): - # 2019-01-30, 1.17 - warnings.warn('temp_file_name is deprecated since NumPy v1.17, use ' - 'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1) - fo, name = make_temp_file() - fo.close() - return name - -def get_pythonexe(): - pythonexe = sys.executable - if os.name in ['nt', 'dos']: - fdir, fn = os.path.split(pythonexe) - fn = fn.upper().replace('PYTHONW', 'PYTHON') - pythonexe = os.path.join(fdir, fn) - assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) - return pythonexe - -def find_executable(exe, path=None, _cache={}): - """Return full path of an executable or None. - - Symbolic links are not followed. - """ - key = exe, path - try: - return _cache[key] - except KeyError: - pass - log.debug('find_executable(%r)' % exe) - orig_exe = exe - - if path is None: - path = os.environ.get('PATH', os.defpath) - if os.name=='posix': - realpath = os.path.realpath - else: - realpath = lambda a:a - - if exe.startswith('"'): - exe = exe[1:-1] - - suffixes = [''] - if os.name in ['nt', 'dos', 'os2']: - fn, ext = os.path.splitext(exe) - extra_suffixes = ['.exe', '.com', '.bat'] - if ext.lower() not in extra_suffixes: - suffixes = extra_suffixes - - if os.path.isabs(exe): - paths = [''] - else: - paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ] - - for path in paths: - fn = os.path.join(path, exe) - for s in suffixes: - f_ext = fn+s - if not os.path.islink(f_ext): - f_ext = realpath(f_ext) - if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK): - log.info('Found executable %s' % f_ext) - _cache[key] = f_ext - return f_ext - - log.warn('Could not locate executable %s' % orig_exe) - return None - -############################################################ - -def _preserve_environment( names ): - log.debug('_preserve_environment(%r)' % (names)) - env = {name: os.environ.get(name) for name in names} - return env - -def _update_environment( **env ): - log.debug('_update_environment(...)') - for name, value in env.items(): - os.environ[name] = value or '' - -def exec_command(command, execute_in='', use_shell=None, use_tee=None, - _with_python = 1, **env ): - """ - Return (status,output) of executed command. - - .. deprecated:: 1.17 - Use subprocess.Popen instead - - Parameters - ---------- - command : str - A concatenated string of executable and arguments. - execute_in : str - Before running command ``cd execute_in`` and after ``cd -``. - use_shell : {bool, None}, optional - If True, execute ``sh -c command``. Default None (True) - use_tee : {bool, None}, optional - If True use tee. Default None (True) - - - Returns - ------- - res : str - Both stdout and stderr messages. - - Notes - ----- - On NT, DOS systems the returned status is correct for external commands. - Wild cards will not work for non-posix systems or when use_shell=0. - - """ - # 2019-01-30, 1.17 - warnings.warn('exec_command is deprecated since NumPy v1.17, use ' - 'subprocess.Popen instead', DeprecationWarning, stacklevel=1) - log.debug('exec_command(%r,%s)' % (command, - ','.join(['%s=%r'%kv for kv in env.items()]))) - - if use_tee is None: - use_tee = os.name=='posix' - if use_shell is None: - use_shell = os.name=='posix' - execute_in = os.path.abspath(execute_in) - oldcwd = os.path.abspath(os.getcwd()) - - if __name__[-12:] == 'exec_command': - exec_dir = os.path.dirname(os.path.abspath(__file__)) - elif os.path.isfile('exec_command.py'): - exec_dir = os.path.abspath('.') - else: - exec_dir = os.path.abspath(sys.argv[0]) - if os.path.isfile(exec_dir): - exec_dir = os.path.dirname(exec_dir) - - if oldcwd!=execute_in: - os.chdir(execute_in) - log.debug('New cwd: %s' % execute_in) - else: - log.debug('Retaining cwd: %s' % oldcwd) - - oldenv = _preserve_environment( list(env.keys()) ) - _update_environment( **env ) - - try: - st = _exec_command(command, - use_shell=use_shell, - use_tee=use_tee, - **env) - finally: - if oldcwd!=execute_in: - os.chdir(oldcwd) - log.debug('Restored cwd to %s' % oldcwd) - _update_environment(**oldenv) - - return st - - -def _exec_command(command, use_shell=None, use_tee = None, **env): - """ - Internal workhorse for exec_command(). - """ - if use_shell is None: - use_shell = os.name=='posix' - if use_tee is None: - use_tee = os.name=='posix' - - if os.name == 'posix' and use_shell: - # On POSIX, subprocess always uses /bin/sh, override - sh = os.environ.get('SHELL', '/bin/sh') - if is_sequence(command): - command = [sh, '-c', ' '.join(command)] - else: - command = [sh, '-c', command] - use_shell = False - - elif os.name == 'nt' and is_sequence(command): - # On Windows, join the string for CreateProcess() ourselves as - # subprocess does it a bit differently - command = ' '.join(_quote_arg(arg) for arg in command) - - # Inherit environment by default - env = env or None - try: - # text is set to False so that communicate() - # will return bytes. We need to decode the output ourselves - # so that Python will not raise a UnicodeDecodeError when - # it encounters an invalid character; rather, we simply replace it - proc = subprocess.Popen(command, shell=use_shell, env=env, text=False, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - except OSError: - # Return 127, as os.spawn*() and /bin/sh do - return 127, '' - - text, err = proc.communicate() - mylocale = locale.getpreferredencoding(False) - if mylocale is None: - mylocale = 'ascii' - text = text.decode(mylocale, errors='replace') - text = text.replace('\r\n', '\n') - # Another historical oddity - if text[-1:] == '\n': - text = text[:-1] - - if use_tee and text: - print(text) - return proc.returncode, text - - -def _quote_arg(arg): - """ - Quote the argument for safe use in a shell command line. - """ - # If there is a quote in the string, assume relevant parts of the - # string are already quoted (e.g. '-I"C:\\Program Files\\..."') - if '"' not in arg and ' ' in arg: - return '"%s"' % arg - return arg - -############################################################ diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py deleted file mode 100644 index 06e6441e65df..000000000000 --- a/numpy/distutils/extension.py +++ /dev/null @@ -1,101 +0,0 @@ -"""distutils.extension - -Provides the Extension class, used to describe C/C++ extension -modules in setup scripts. - -Overridden to support f2py. - -""" -import re -from distutils.extension import Extension as old_Extension - - -cxx_ext_re = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match -fortran_pyf_ext_re = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match - - -class Extension(old_Extension): - """ - Parameters - ---------- - name : str - Extension name. - sources : list of str - List of source file locations relative to the top directory of - the package. - extra_compile_args : list of str - Extra command line arguments to pass to the compiler. - extra_f77_compile_args : list of str - Extra command line arguments to pass to the fortran77 compiler. - extra_f90_compile_args : list of str - Extra command line arguments to pass to the fortran90 compiler. - """ - def __init__( - self, name, sources, - include_dirs=None, - define_macros=None, - undef_macros=None, - library_dirs=None, - libraries=None, - runtime_library_dirs=None, - extra_objects=None, - extra_compile_args=None, - extra_link_args=None, - export_symbols=None, - swig_opts=None, - depends=None, - language=None, - f2py_options=None, - module_dirs=None, - extra_c_compile_args=None, - extra_cxx_compile_args=None, - extra_f77_compile_args=None, - extra_f90_compile_args=None,): - - old_Extension.__init__( - self, name, [], - include_dirs=include_dirs, - define_macros=define_macros, - undef_macros=undef_macros, - library_dirs=library_dirs, - libraries=libraries, - runtime_library_dirs=runtime_library_dirs, - extra_objects=extra_objects, - extra_compile_args=extra_compile_args, - extra_link_args=extra_link_args, - export_symbols=export_symbols) - - # Avoid assert statements checking that sources contains strings: - self.sources = sources - - # Python 2.4 distutils new features - self.swig_opts = swig_opts or [] - # swig_opts is assumed to be a list. Here we handle the case where it - # is specified as a string instead. - if isinstance(self.swig_opts, str): - import warnings - msg = "swig_opts is specified as a string instead of a list" - warnings.warn(msg, SyntaxWarning, stacklevel=2) - self.swig_opts = self.swig_opts.split() - - # Python 2.3 distutils new features - self.depends = depends or [] - self.language = language - - # numpy_distutils features - self.f2py_options = f2py_options or [] - self.module_dirs = module_dirs or [] - self.extra_c_compile_args = extra_c_compile_args or [] - self.extra_cxx_compile_args = extra_cxx_compile_args or [] - self.extra_f77_compile_args = extra_f77_compile_args or [] - self.extra_f90_compile_args = extra_f90_compile_args or [] - - return - - def has_cxx_sources(self): - return any(cxx_ext_re(str(source)) for source in self.sources) - - def has_f2py_sources(self): - return any(fortran_pyf_ext_re(source) for source in self.sources) - -# class Extension diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py deleted file mode 100644 index 5160e2abf54f..000000000000 --- a/numpy/distutils/fcompiler/__init__.py +++ /dev/null @@ -1,1035 +0,0 @@ -"""numpy.distutils.fcompiler - -Contains FCompiler, an abstract base class that defines the interface -for the numpy.distutils Fortran compiler abstraction model. - -Terminology: - -To be consistent, where the term 'executable' is used, it means the single -file, like 'gcc', that is executed, and should be a string. In contrast, -'command' means the entire command line, like ['gcc', '-c', 'file.c'], and -should be a list. - -But note that FCompiler.executables is actually a dictionary of commands. - -""" -__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers', - 'dummy_fortran_file'] - -import os -import sys -import re -from pathlib import Path - -from distutils.sysconfig import get_python_lib -from distutils.fancy_getopt import FancyGetopt -from distutils.errors import DistutilsModuleError, \ - DistutilsExecError, CompileError, LinkError, DistutilsPlatformError -from distutils.util import split_quoted, strtobool - -from numpy.distutils.ccompiler import CCompiler, gen_lib_options -from numpy.distutils import log -from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \ - make_temp_file, get_shared_lib_extension -from numpy.distutils.exec_command import find_executable -from numpy.distutils import _shell_utils - -from .environment import EnvironmentConfig - -__metaclass__ = type - - -FORTRAN_COMMON_FIXED_EXTENSIONS = ['.for', '.ftn', '.f77', '.f'] - - -class CompilerNotFound(Exception): - pass - -def flaglist(s): - if is_string(s): - return split_quoted(s) - else: - return s - -def str2bool(s): - if is_string(s): - return strtobool(s) - return bool(s) - -def is_sequence_of_strings(seq): - return is_sequence(seq) and all_strings(seq) - -class FCompiler(CCompiler): - """Abstract base class to define the interface that must be implemented - by real Fortran compiler classes. - - Methods that subclasses may redefine: - - update_executables(), find_executables(), get_version() - get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug() - get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(), - get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(), - get_flags_arch_f90(), get_flags_debug_f90(), - get_flags_fix(), get_flags_linker_so() - - DON'T call these methods (except get_version) after - constructing a compiler instance or inside any other method. - All methods, except update_executables() and find_executables(), - may call the get_version() method. - - After constructing a compiler instance, always call customize(dist=None) - method that finalizes compiler construction and makes the following - attributes available: - compiler_f77 - compiler_f90 - compiler_fix - linker_so - archiver - ranlib - libraries - library_dirs - """ - - # These are the environment variables and distutils keys used. - # Each configuration description is - # (, , , , ) - # The hook names are handled by the self._environment_hook method. - # - names starting with 'self.' call methods in this class - # - names starting with 'exe.' return the key in the executables dict - # - names like 'flags.YYY' return self.get_flag_YYY() - # convert is either None or a function to convert a string to the - # appropriate type used. - - distutils_vars = EnvironmentConfig( - distutils_section='config_fc', - noopt = (None, None, 'noopt', str2bool, False), - noarch = (None, None, 'noarch', str2bool, False), - debug = (None, None, 'debug', str2bool, False), - verbose = (None, None, 'verbose', str2bool, False), - ) - - command_vars = EnvironmentConfig( - distutils_section='config_fc', - compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False), - compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False), - compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False), - version_cmd = ('exe.version_cmd', None, None, None, False), - linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False), - linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False), - archiver = (None, 'AR', 'ar', None, False), - ranlib = (None, 'RANLIB', 'ranlib', None, False), - ) - - flag_vars = EnvironmentConfig( - distutils_section='config_fc', - f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True), - f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True), - free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True), - fix = ('flags.fix', None, None, flaglist, False), - opt = ('flags.opt', 'FOPT', 'opt', flaglist, True), - opt_f77 = ('flags.opt_f77', None, None, flaglist, False), - opt_f90 = ('flags.opt_f90', None, None, flaglist, False), - arch = ('flags.arch', 'FARCH', 'arch', flaglist, False), - arch_f77 = ('flags.arch_f77', None, None, flaglist, False), - arch_f90 = ('flags.arch_f90', None, None, flaglist, False), - debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True), - debug_f77 = ('flags.debug_f77', None, None, flaglist, False), - debug_f90 = ('flags.debug_f90', None, None, flaglist, False), - flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True), - linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True), - linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True), - ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True), - ) - - language_map = {'.f': 'f77', - '.for': 'f77', - '.F': 'f77', # XXX: needs preprocessor - '.ftn': 'f77', - '.f77': 'f77', - '.f90': 'f90', - '.F90': 'f90', # XXX: needs preprocessor - '.f95': 'f90', - } - language_order = ['f90', 'f77'] - - - # These will be set by the subclass - - compiler_type = None - compiler_aliases = () - version_pattern = None - - possible_executables = [] - executables = { - 'version_cmd': ["f77", "-v"], - 'compiler_f77': ["f77"], - 'compiler_f90': ["f90"], - 'compiler_fix': ["f90", "-fixed"], - 'linker_so': ["f90", "-shared"], - 'linker_exe': ["f90"], - 'archiver': ["ar", "-cr"], - 'ranlib': None, - } - - # If compiler does not support compiling Fortran 90 then it can - # suggest using another compiler. For example, gnu would suggest - # gnu95 compiler type when there are F90 sources. - suggested_f90_compiler = None - - compile_switch = "-c" - object_switch = "-o " # Ending space matters! It will be stripped - # but if it is missing then object_switch - # will be prefixed to object file name by - # string concatenation. - library_switch = "-o " # Ditto! - - # Switch to specify where module files are created and searched - # for USE statement. Normally it is a string and also here ending - # space matters. See above. - module_dir_switch = None - - # Switch to specify where module files are searched for USE statement. - module_include_switch = '-I' - - pic_flags = [] # Flags to create position-independent code - - src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR'] - obj_extension = ".o" - - shared_lib_extension = get_shared_lib_extension() - static_lib_extension = ".a" # or .lib - static_lib_format = "lib%s%s" # or %s%s - shared_lib_format = "%s%s" - exe_extension = "" - - _exe_cache = {} - - _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe', 'archiver', - 'ranlib'] - - # This will be set by new_fcompiler when called in - # command/{build_ext.py, build_clib.py, config.py} files. - c_compiler = None - - # extra_{f77,f90}_compile_args are set by build_ext.build_extension method - extra_f77_compile_args = [] - extra_f90_compile_args = [] - - def __init__(self, *args, **kw): - CCompiler.__init__(self, *args, **kw) - self.distutils_vars = self.distutils_vars.clone(self._environment_hook) - self.command_vars = self.command_vars.clone(self._environment_hook) - self.flag_vars = self.flag_vars.clone(self._environment_hook) - self.executables = self.executables.copy() - for e in self._executable_keys: - if e not in self.executables: - self.executables[e] = None - - # Some methods depend on .customize() being called first, so - # this keeps track of whether that's happened yet. - self._is_customised = False - - def __copy__(self): - obj = self.__new__(self.__class__) - obj.__dict__.update(self.__dict__) - obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook) - obj.command_vars = obj.command_vars.clone(obj._environment_hook) - obj.flag_vars = obj.flag_vars.clone(obj._environment_hook) - obj.executables = obj.executables.copy() - return obj - - def copy(self): - return self.__copy__() - - # Use properties for the attributes used by CCompiler. Setting them - # as attributes from the self.executables dictionary is error-prone, - # so we get them from there each time. - def _command_property(key): - def fget(self): - assert self._is_customised - return self.executables[key] - return property(fget=fget) - version_cmd = _command_property('version_cmd') - compiler_f77 = _command_property('compiler_f77') - compiler_f90 = _command_property('compiler_f90') - compiler_fix = _command_property('compiler_fix') - linker_so = _command_property('linker_so') - linker_exe = _command_property('linker_exe') - archiver = _command_property('archiver') - ranlib = _command_property('ranlib') - - # Make our terminology consistent. - def set_executable(self, key, value): - self.set_command(key, value) - - def set_commands(self, **kw): - for k, v in kw.items(): - self.set_command(k, v) - - def set_command(self, key, value): - if not key in self._executable_keys: - raise ValueError( - "unknown executable '%s' for class %s" % - (key, self.__class__.__name__)) - if is_string(value): - value = split_quoted(value) - assert value is None or is_sequence_of_strings(value[1:]), (key, value) - self.executables[key] = value - - ###################################################################### - ## Methods that subclasses may redefine. But don't call these methods! - ## They are private to FCompiler class and may return unexpected - ## results if used elsewhere. So, you have been warned.. - - def find_executables(self): - """Go through the self.executables dictionary, and attempt to - find and assign appropriate executables. - - Executable names are looked for in the environment (environment - variables, the distutils.cfg, and command line), the 0th-element of - the command list, and the self.possible_executables list. - - Also, if the 0th element is "" or "", the Fortran 77 - or the Fortran 90 compiler executable is used, unless overridden - by an environment setting. - - Subclasses should call this if overridden. - """ - assert self._is_customised - exe_cache = self._exe_cache - def cached_find_executable(exe): - if exe in exe_cache: - return exe_cache[exe] - fc_exe = find_executable(exe) - exe_cache[exe] = exe_cache[fc_exe] = fc_exe - return fc_exe - def verify_command_form(name, value): - if value is not None and not is_sequence_of_strings(value): - raise ValueError( - "%s value %r is invalid in class %s" % - (name, value, self.__class__.__name__)) - def set_exe(exe_key, f77=None, f90=None): - cmd = self.executables.get(exe_key, None) - if not cmd: - return None - # Note that we get cmd[0] here if the environment doesn't - # have anything set - exe_from_environ = getattr(self.command_vars, exe_key) - if not exe_from_environ: - possibles = [f90, f77] + self.possible_executables - else: - possibles = [exe_from_environ] + self.possible_executables - - seen = set() - unique_possibles = [] - for e in possibles: - if e == '': - e = f77 - elif e == '': - e = f90 - if not e or e in seen: - continue - seen.add(e) - unique_possibles.append(e) - - for exe in unique_possibles: - fc_exe = cached_find_executable(exe) - if fc_exe: - cmd[0] = fc_exe - return fc_exe - self.set_command(exe_key, None) - return None - - ctype = self.compiler_type - f90 = set_exe('compiler_f90') - if not f90: - f77 = set_exe('compiler_f77') - if f77: - log.warn('%s: no Fortran 90 compiler found' % ctype) - else: - raise CompilerNotFound('%s: f90 nor f77' % ctype) - else: - f77 = set_exe('compiler_f77', f90=f90) - if not f77: - log.warn('%s: no Fortran 77 compiler found' % ctype) - set_exe('compiler_fix', f90=f90) - - set_exe('linker_so', f77=f77, f90=f90) - set_exe('linker_exe', f77=f77, f90=f90) - set_exe('version_cmd', f77=f77, f90=f90) - set_exe('archiver') - set_exe('ranlib') - - def update_executables(self): - """Called at the beginning of customisation. Subclasses should - override this if they need to set up the executables dictionary. - - Note that self.find_executables() is run afterwards, so the - self.executables dictionary values can contain or as - the command, which will be replaced by the found F77 or F90 - compiler. - """ - pass - - def get_flags(self): - """List of flags common to all compiler types.""" - return [] + self.pic_flags - - def _get_command_flags(self, key): - cmd = self.executables.get(key, None) - if cmd is None: - return [] - return cmd[1:] - - def get_flags_f77(self): - """List of Fortran 77 specific flags.""" - return self._get_command_flags('compiler_f77') - def get_flags_f90(self): - """List of Fortran 90 specific flags.""" - return self._get_command_flags('compiler_f90') - def get_flags_free(self): - """List of Fortran 90 free format specific flags.""" - return [] - def get_flags_fix(self): - """List of Fortran 90 fixed format specific flags.""" - return self._get_command_flags('compiler_fix') - def get_flags_linker_so(self): - """List of linker flags to build a shared library.""" - return self._get_command_flags('linker_so') - def get_flags_linker_exe(self): - """List of linker flags to build an executable.""" - return self._get_command_flags('linker_exe') - def get_flags_ar(self): - """List of archiver flags. """ - return self._get_command_flags('archiver') - def get_flags_opt(self): - """List of architecture independent compiler flags.""" - return [] - def get_flags_arch(self): - """List of architecture dependent compiler flags.""" - return [] - def get_flags_debug(self): - """List of compiler flags to compile with debugging information.""" - return [] - - get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt - get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch - get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug - - def get_libraries(self): - """List of compiler libraries.""" - return self.libraries[:] - def get_library_dirs(self): - """List of compiler library directories.""" - return self.library_dirs[:] - - def get_version(self, force=False, ok_status=[0]): - assert self._is_customised - version = CCompiler.get_version(self, force=force, ok_status=ok_status) - if version is None: - raise CompilerNotFound() - return version - - - ############################################################ - - ## Public methods: - - def customize(self, dist = None): - """Customize Fortran compiler. - - This method gets Fortran compiler specific information from - (i) class definition, (ii) environment, (iii) distutils config - files, and (iv) command line (later overrides earlier). - - This method should be always called after constructing a - compiler instance. But not in __init__ because Distribution - instance is needed for (iii) and (iv). - """ - log.info('customize %s' % (self.__class__.__name__)) - - self._is_customised = True - - self.distutils_vars.use_distribution(dist) - self.command_vars.use_distribution(dist) - self.flag_vars.use_distribution(dist) - - self.update_executables() - - # find_executables takes care of setting the compiler commands, - # version_cmd, linker_so, linker_exe, ar, and ranlib - self.find_executables() - - noopt = self.distutils_vars.get('noopt', False) - noarch = self.distutils_vars.get('noarch', noopt) - debug = self.distutils_vars.get('debug', False) - - f77 = self.command_vars.compiler_f77 - f90 = self.command_vars.compiler_f90 - - f77flags = [] - f90flags = [] - freeflags = [] - fixflags = [] - - if f77: - f77 = _shell_utils.NativeParser.split(f77) - f77flags = self.flag_vars.f77 - if f90: - f90 = _shell_utils.NativeParser.split(f90) - f90flags = self.flag_vars.f90 - freeflags = self.flag_vars.free - # XXX Assuming that free format is default for f90 compiler. - fix = self.command_vars.compiler_fix - # NOTE: this and similar examples are probably just - # excluding --coverage flag when F90 = gfortran --coverage - # instead of putting that flag somewhere more appropriate - # this and similar examples where a Fortran compiler - # environment variable has been customized by CI or a user - # should perhaps eventually be more thoroughly tested and more - # robustly handled - if fix: - fix = _shell_utils.NativeParser.split(fix) - fixflags = self.flag_vars.fix + f90flags - - oflags, aflags, dflags = [], [], [] - # examine get_flags__ for extra flags - # only add them if the method is different from get_flags_ - def get_flags(tag, flags): - # note that self.flag_vars. calls self.get_flags_() - flags.extend(getattr(self.flag_vars, tag)) - this_get = getattr(self, 'get_flags_' + tag) - for name, c, flagvar in [('f77', f77, f77flags), - ('f90', f90, f90flags), - ('f90', fix, fixflags)]: - t = '%s_%s' % (tag, name) - if c and this_get is not getattr(self, 'get_flags_' + t): - flagvar.extend(getattr(self.flag_vars, t)) - if not noopt: - get_flags('opt', oflags) - if not noarch: - get_flags('arch', aflags) - if debug: - get_flags('debug', dflags) - - fflags = self.flag_vars.flags + dflags + oflags + aflags - - if f77: - self.set_commands(compiler_f77=f77+f77flags+fflags) - if f90: - self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags) - if fix: - self.set_commands(compiler_fix=fix+fixflags+fflags) - - - #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS - linker_so = self.linker_so - if linker_so: - linker_so_flags = self.flag_vars.linker_so - if sys.platform.startswith('aix'): - python_lib = get_python_lib(standard_lib=1) - ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') - python_exp = os.path.join(python_lib, 'config', 'python.exp') - linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] - if sys.platform.startswith('os400'): - from distutils.sysconfig import get_config_var - python_config = get_config_var('LIBPL') - ld_so_aix = os.path.join(python_config, 'ld_so_aix') - python_exp = os.path.join(python_config, 'python.exp') - linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] - self.set_commands(linker_so=linker_so+linker_so_flags) - - linker_exe = self.linker_exe - if linker_exe: - linker_exe_flags = self.flag_vars.linker_exe - self.set_commands(linker_exe=linker_exe+linker_exe_flags) - - ar = self.command_vars.archiver - if ar: - arflags = self.flag_vars.ar - self.set_commands(archiver=[ar]+arflags) - - self.set_library_dirs(self.get_library_dirs()) - self.set_libraries(self.get_libraries()) - - def dump_properties(self): - """Print out the attributes of a compiler instance.""" - props = [] - for key in list(self.executables.keys()) + \ - ['version', 'libraries', 'library_dirs', - 'object_switch', 'compile_switch']: - if hasattr(self, key): - v = getattr(self, key) - props.append((key, None, '= '+repr(v))) - props.sort() - - pretty_printer = FancyGetopt(props) - for l in pretty_printer.generate_help("%s instance properties:" \ - % (self.__class__.__name__)): - if l[:4]==' --': - l = ' ' + l[4:] - print(l) - - ################### - - def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile 'src' to product 'obj'.""" - src_flags = {} - if Path(src).suffix.lower() in FORTRAN_COMMON_FIXED_EXTENSIONS \ - and not has_f90_header(src): - flavor = ':f77' - compiler = self.compiler_f77 - src_flags = get_f77flags(src) - extra_compile_args = self.extra_f77_compile_args or [] - elif is_free_format(src): - flavor = ':f90' - compiler = self.compiler_f90 - if compiler is None: - raise DistutilsExecError('f90 not supported by %s needed for %s'\ - % (self.__class__.__name__, src)) - extra_compile_args = self.extra_f90_compile_args or [] - else: - flavor = ':fix' - compiler = self.compiler_fix - if compiler is None: - raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\ - % (self.__class__.__name__, src)) - extra_compile_args = self.extra_f90_compile_args or [] - if self.object_switch[-1]==' ': - o_args = [self.object_switch.strip(), obj] - else: - o_args = [self.object_switch.strip()+obj] - - assert self.compile_switch.strip() - s_args = [self.compile_switch, src] - - if extra_compile_args: - log.info('extra %s options: %r' \ - % (flavor[1:], ' '.join(extra_compile_args))) - - extra_flags = src_flags.get(self.compiler_type, []) - if extra_flags: - log.info('using compile options from source: %r' \ - % ' '.join(extra_flags)) - - command = compiler + cc_args + extra_flags + s_args + o_args \ - + extra_postargs + extra_compile_args - - display = '%s: %s' % (os.path.basename(compiler[0]) + flavor, - src) - try: - self.spawn(command, display=display) - except DistutilsExecError as e: - msg = str(e) - raise CompileError(msg) from None - - def module_options(self, module_dirs, module_build_dir): - options = [] - if self.module_dir_switch is not None: - if self.module_dir_switch[-1]==' ': - options.extend([self.module_dir_switch.strip(), module_build_dir]) - else: - options.append(self.module_dir_switch.strip()+module_build_dir) - else: - print('XXX: module_build_dir=%r option ignored' % (module_build_dir)) - print('XXX: Fix module_dir_switch for ', self.__class__.__name__) - if self.module_include_switch is not None: - for d in [module_build_dir]+module_dirs: - options.append('%s%s' % (self.module_include_switch, d)) - else: - print('XXX: module_dirs=%r option ignored' % (module_dirs)) - print('XXX: Fix module_include_switch for ', self.__class__.__name__) - return options - - def library_option(self, lib): - return "-l" + lib - def library_dir_option(self, dir): - return "-L" + dir - - def link(self, target_desc, objects, - output_filename, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, - export_symbols=None, debug=0, extra_preargs=None, - extra_postargs=None, build_temp=None, target_lang=None): - objects, output_dir = self._fix_object_args(objects, output_dir) - libraries, library_dirs, runtime_library_dirs = \ - self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) - - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, - libraries) - if is_string(output_dir): - output_filename = os.path.join(output_dir, output_filename) - elif output_dir is not None: - raise TypeError("'output_dir' must be a string or None") - - if self._need_link(objects, output_filename): - if self.library_switch[-1]==' ': - o_args = [self.library_switch.strip(), output_filename] - else: - o_args = [self.library_switch.strip()+output_filename] - - if is_string(self.objects): - ld_args = objects + [self.objects] - else: - ld_args = objects + self.objects - ld_args = ld_args + lib_opts + o_args - if debug: - ld_args[:0] = ['-g'] - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - self.mkpath(os.path.dirname(output_filename)) - if target_desc == CCompiler.EXECUTABLE: - linker = self.linker_exe[:] - else: - linker = self.linker_so[:] - command = linker + ld_args - try: - self.spawn(command) - except DistutilsExecError as e: - msg = str(e) - raise LinkError(msg) from None - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def _environment_hook(self, name, hook_name): - if hook_name is None: - return None - if is_string(hook_name): - if hook_name.startswith('self.'): - hook_name = hook_name[5:] - hook = getattr(self, hook_name) - return hook() - elif hook_name.startswith('exe.'): - hook_name = hook_name[4:] - var = self.executables[hook_name] - if var: - return var[0] - else: - return None - elif hook_name.startswith('flags.'): - hook_name = hook_name[6:] - hook = getattr(self, 'get_flags_' + hook_name) - return hook() - else: - return hook_name() - - def can_ccompiler_link(self, ccompiler): - """ - Check if the given C compiler can link objects produced by - this compiler. - """ - return True - - def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): - """ - Convert a set of object files that are not compatible with the default - linker, to a file that is compatible. - - Parameters - ---------- - objects : list - List of object files to include. - output_dir : str - Output directory to place generated object files. - extra_dll_dir : str - Output directory to place extra DLL files that need to be - included on Windows. - - Returns - ------- - converted_objects : list of str - List of converted object files. - Note that the number of output files is not necessarily - the same as inputs. - - """ - raise NotImplementedError() - - ## class FCompiler - -_default_compilers = ( - # sys.platform mappings - ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95', - 'intelvem', 'intelem', 'flang')), - ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), - ('linux.*', ('arm', 'gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag', - 'vast', 'compaq', 'intele', 'intelem', 'gnu', 'g95', - 'pathf95', 'nagfor', 'fujitsu')), - ('darwin.*', ('gnu95', 'nag', 'nagfor', 'absoft', 'ibm', 'intel', 'gnu', - 'g95', 'pg')), - ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), - ('irix.*', ('mips', 'gnu', 'gnu95',)), - ('aix.*', ('ibm', 'gnu', 'gnu95',)), - # os.name mappings - ('posix', ('gnu', 'gnu95',)), - ('nt', ('gnu', 'gnu95',)), - ('mac', ('gnu95', 'gnu', 'pg')), - ) - -fcompiler_class = None -fcompiler_aliases = None - -def load_all_fcompiler_classes(): - """Cache all the FCompiler classes found in modules in the - numpy.distutils.fcompiler package. - """ - from glob import glob - global fcompiler_class, fcompiler_aliases - if fcompiler_class is not None: - return - pys = os.path.join(os.path.dirname(__file__), '*.py') - fcompiler_class = {} - fcompiler_aliases = {} - for fname in glob(pys): - module_name, ext = os.path.splitext(os.path.basename(fname)) - module_name = 'numpy.distutils.fcompiler.' + module_name - __import__ (module_name) - module = sys.modules[module_name] - if hasattr(module, 'compilers'): - for cname in module.compilers: - klass = getattr(module, cname) - desc = (klass.compiler_type, klass, klass.description) - fcompiler_class[klass.compiler_type] = desc - for alias in klass.compiler_aliases: - if alias in fcompiler_aliases: - raise ValueError("alias %r defined for both %s and %s" - % (alias, klass.__name__, - fcompiler_aliases[alias][1].__name__)) - fcompiler_aliases[alias] = desc - -def _find_existing_fcompiler(compiler_types, - osname=None, platform=None, - requiref90=False, - c_compiler=None): - from numpy.distutils.core import get_distribution - dist = get_distribution(always=True) - for compiler_type in compiler_types: - v = None - try: - c = new_fcompiler(plat=platform, compiler=compiler_type, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if requiref90 and c.compiler_f90 is None: - v = None - new_compiler = c.suggested_f90_compiler - if new_compiler: - log.warn('Trying %r compiler as suggested by %r ' - 'compiler for f90 support.' % (compiler_type, - new_compiler)) - c = new_fcompiler(plat=platform, compiler=new_compiler, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if v is not None: - compiler_type = new_compiler - if requiref90 and c.compiler_f90 is None: - raise ValueError('%s does not support compiling f90 codes, ' - 'skipping.' % (c.__class__.__name__)) - except DistutilsModuleError: - log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type) - except CompilerNotFound: - log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type) - if v is not None: - return compiler_type - return None - -def available_fcompilers_for_platform(osname=None, platform=None): - if osname is None: - osname = os.name - if platform is None: - platform = sys.platform - matching_compiler_types = [] - for pattern, compiler_type in _default_compilers: - if re.match(pattern, platform) or re.match(pattern, osname): - for ct in compiler_type: - if ct not in matching_compiler_types: - matching_compiler_types.append(ct) - if not matching_compiler_types: - matching_compiler_types.append('gnu') - return matching_compiler_types - -def get_default_fcompiler(osname=None, platform=None, requiref90=False, - c_compiler=None): - """Determine the default Fortran compiler to use for the given - platform.""" - matching_compiler_types = available_fcompilers_for_platform(osname, - platform) - log.info("get_default_fcompiler: matching types: '%s'", - matching_compiler_types) - compiler_type = _find_existing_fcompiler(matching_compiler_types, - osname=osname, - platform=platform, - requiref90=requiref90, - c_compiler=c_compiler) - return compiler_type - -# Flag to avoid rechecking for Fortran compiler every time -failed_fcompilers = set() - -def new_fcompiler(plat=None, - compiler=None, - verbose=0, - dry_run=0, - force=0, - requiref90=False, - c_compiler = None): - """Generate an instance of some FCompiler subclass for the supplied - platform/compiler combination. - """ - global failed_fcompilers - fcompiler_key = (plat, compiler) - if fcompiler_key in failed_fcompilers: - return None - - load_all_fcompiler_classes() - if plat is None: - plat = os.name - if compiler is None: - compiler = get_default_fcompiler(plat, requiref90=requiref90, - c_compiler=c_compiler) - if compiler in fcompiler_class: - module_name, klass, long_description = fcompiler_class[compiler] - elif compiler in fcompiler_aliases: - module_name, klass, long_description = fcompiler_aliases[compiler] - else: - msg = "don't know how to compile Fortran code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler." % compiler - msg = msg + " Supported compilers are: %s)" \ - % (','.join(fcompiler_class.keys())) - log.warn(msg) - failed_fcompilers.add(fcompiler_key) - return None - - compiler = klass(verbose=verbose, dry_run=dry_run, force=force) - compiler.c_compiler = c_compiler - return compiler - -def show_fcompilers(dist=None): - """Print list of available compilers (used by the "--help-fcompiler" - option to "config_fc"). - """ - if dist is None: - from distutils.dist import Distribution - from numpy.distutils.command.config_compiler import config_fc - dist = Distribution() - dist.script_name = os.path.basename(sys.argv[0]) - dist.script_args = ['config_fc'] + sys.argv[1:] - try: - dist.script_args.remove('--help-fcompiler') - except ValueError: - pass - dist.cmdclass['config_fc'] = config_fc - dist.parse_config_files() - dist.parse_command_line() - compilers = [] - compilers_na = [] - compilers_ni = [] - if not fcompiler_class: - load_all_fcompiler_classes() - platform_compilers = available_fcompilers_for_platform() - for compiler in platform_compilers: - v = None - log.set_verbosity(-2) - try: - c = new_fcompiler(compiler=compiler, verbose=dist.verbose) - c.customize(dist) - v = c.get_version() - except (DistutilsModuleError, CompilerNotFound) as e: - log.debug("show_fcompilers: %s not found" % (compiler,)) - log.debug(repr(e)) - - if v is None: - compilers_na.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2])) - else: - c.dump_properties() - compilers.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2] + ' (%s)' % v)) - - compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers)) - compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2]) - for fc in compilers_ni] - - compilers.sort() - compilers_na.sort() - compilers_ni.sort() - pretty_printer = FancyGetopt(compilers) - pretty_printer.print_help("Fortran compilers found:") - pretty_printer = FancyGetopt(compilers_na) - pretty_printer.print_help("Compilers available for this " - "platform, but not found:") - if compilers_ni: - pretty_printer = FancyGetopt(compilers_ni) - pretty_printer.print_help("Compilers not available on this platform:") - print("For compiler details, run 'config_fc --verbose' setup command.") - - -def dummy_fortran_file(): - fo, name = make_temp_file(suffix='.f') - fo.write(" subroutine dummy()\n end\n") - fo.close() - return name[:-2] - - -_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search -_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search -_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search -_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match - -def is_free_format(file): - """Check if file is in free format Fortran.""" - # f90 allows both fixed and free format, assuming fixed unless - # signs of free format are detected. - result = 0 - with open(file, encoding='latin1') as f: - line = f.readline() - n = 10000 # the number of non-comment lines to scan for hints - if _has_f_header(line) or _has_fix_header(line): - n = 0 - elif _has_f90_header(line): - n = 0 - result = 1 - while n>0 and line: - line = line.rstrip() - if line and line[0]!='!': - n -= 1 - if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&': - result = 1 - break - line = f.readline() - return result - -def has_f90_header(src): - with open(src, encoding='latin1') as f: - line = f.readline() - return _has_f90_header(line) or _has_fix_header(line) - -_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P\w+)\s*\)\s*=\s*(?P.*)', re.I) -def get_f77flags(src): - """ - Search the first 20 lines of fortran 77 code for line pattern - `CF77FLAGS()=` - Return a dictionary {:}. - """ - flags = {} - with open(src, encoding='latin1') as f: - i = 0 - for line in f: - i += 1 - if i>20: break - m = _f77flags_re.match(line) - if not m: continue - fcname = m.group('fcname').strip() - fflags = m.group('fflags').strip() - flags[fcname] = split_quoted(fflags) - return flags - -# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags - -if __name__ == '__main__': - show_fcompilers() diff --git a/numpy/distutils/fcompiler/absoft.py b/numpy/distutils/fcompiler/absoft.py deleted file mode 100644 index e013def5d1a4..000000000000 --- a/numpy/distutils/fcompiler/absoft.py +++ /dev/null @@ -1,158 +0,0 @@ - -# Absoft Corporation ceased operations on 12/31/2022. -# Thus, all links to are invalid. - -# Notes: -# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py -# generated extension modules (works for f2py v2.45.241_1936 and up) -import os - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file -from numpy.distutils.misc_util import cyg2win32 - -compilers = ['AbsoftFCompiler'] - -class AbsoftFCompiler(FCompiler): - - compiler_type = 'absoft' - description = 'Absoft Corp Fortran Compiler' - #version_pattern = r'FORTRAN 77 Compiler (?P[^\s*,]*).*?Absoft Corp' - version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler'\ - r'|Absoft Fortran Compiler Version'\ - r'|Copyright Absoft Corporation.*?Version))'\ - r' (?P[^\s*,]*)(.*?Absoft Corp|)' - - # on windows: f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 - - # samt5735(8)$ f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 - # Note that fink installs g77 as f77, so need to use f90 for detection. - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : ["f77"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - if os.name=='nt': - library_switch = '/out:' #No space after /out:! - - module_dir_switch = None - module_include_switch = '-p' - - def update_executables(self): - f = cyg2win32(dummy_fortran_file()) - self.executables['version_cmd'] = ['', '-V', '-c', - f+'.f', '-o', f+'.o'] - - def get_flags_linker_so(self): - if os.name=='nt': - opt = ['/dll'] - # The "-K shared" switches are being left in for pre-9.0 versions - # of Absoft though I don't think versions earlier than 9 can - # actually be used to build shared libraries. In fact, version - # 8 of Absoft doesn't recognize "-K shared" and will fail. - elif self.get_version() >= '9.0': - opt = ['-shared'] - else: - opt = ["-K", "shared"] - return opt - - def library_dir_option(self, dir): - if os.name=='nt': - return ['-link', '/PATH:%s' % (dir)] - return "-L" + dir - - def library_option(self, lib): - if os.name=='nt': - return '%s.lib' % (lib) - return "-l" + lib - - def get_library_dirs(self): - opt = FCompiler.get_library_dirs(self) - d = os.environ.get('ABSOFT') - if d: - if self.get_version() >= '10.0': - # use shared libraries, the static libraries were not compiled -fPIC - prefix = 'sh' - else: - prefix = '' - if cpu.is_64bit(): - suffix = '64' - else: - suffix = '' - opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) - return opt - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - if self.get_version() >= '11.0': - opt.extend(['af90math', 'afio', 'af77math', 'amisc']) - elif self.get_version() >= '10.0': - opt.extend(['af90math', 'afio', 'af77math', 'U77']) - elif self.get_version() >= '8.0': - opt.extend(['f90math', 'fio', 'f77math', 'U77']) - else: - opt.extend(['fio', 'f90math', 'fmath', 'U77']) - if os.name =='nt': - opt.append('COMDLG32') - return opt - - def get_flags(self): - opt = FCompiler.get_flags(self) - if os.name != 'nt': - opt.extend(['-s']) - if self.get_version(): - if self.get_version()>='8.2': - opt.append('-fpic') - return opt - - def get_flags_f77(self): - opt = FCompiler.get_flags_f77(self) - opt.extend(['-N22', '-N90', '-N110']) - v = self.get_version() - if os.name == 'nt': - if v and v>='8.0': - opt.extend(['-f', '-N15']) - else: - opt.append('-f') - if v: - if v<='4.6': - opt.append('-B108') - else: - # Though -N15 is undocumented, it works with - # Absoft 8.0 on Linux - opt.append('-N15') - return opt - - def get_flags_f90(self): - opt = FCompiler.get_flags_f90(self) - opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", - "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) - if self.get_version(): - if self.get_version()>'4.6': - opt.extend(["-YDEALLOC=ALL"]) - return opt - - def get_flags_fix(self): - opt = FCompiler.get_flags_fix(self) - opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", - "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) - opt.extend(["-f", "fixed"]) - return opt - - def get_flags_opt(self): - opt = ['-O'] - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='absoft').get_version()) diff --git a/numpy/distutils/fcompiler/arm.py b/numpy/distutils/fcompiler/arm.py deleted file mode 100644 index 3eb7e9af9c8c..000000000000 --- a/numpy/distutils/fcompiler/arm.py +++ /dev/null @@ -1,71 +0,0 @@ -import sys - -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file -from sys import platform -from os.path import join, dirname, normpath - -compilers = ['ArmFlangCompiler'] - -import functools - -class ArmFlangCompiler(FCompiler): - compiler_type = 'arm' - description = 'Arm Compiler' - version_pattern = r'\s*Arm.*version (?P[\d.-]+).*' - - ar_exe = 'lib.exe' - possible_executables = ['armflang'] - - executables = { - 'version_cmd': ["", "--version"], - 'compiler_f77': ["armflang", "-fPIC"], - 'compiler_fix': ["armflang", "-fPIC", "-ffixed-form"], - 'compiler_f90': ["armflang", "-fPIC"], - 'linker_so': ["armflang", "-fPIC", "-shared"], - 'archiver': ["ar", "-cr"], - 'ranlib': None - } - - pic_flags = ["-fPIC", "-DPIC"] - c_compiler = 'arm' - module_dir_switch = '-module ' # Don't remove ending space! - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - opt.extend(['flang', 'flangrti', 'ompstub']) - return opt - - @functools.lru_cache(maxsize=128) - def get_library_dirs(self): - """List of compiler library directories.""" - opt = FCompiler.get_library_dirs(self) - flang_dir = dirname(self.executables['compiler_f77'][0]) - opt.append(normpath(join(flang_dir, '..', 'lib'))) - - return opt - - def get_flags(self): - return [] - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - return ['-O3'] - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - return '-Wl,-rpath=%s' % dir - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='armflang').get_version()) - diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py deleted file mode 100644 index 01314c136acf..000000000000 --- a/numpy/distutils/fcompiler/compaq.py +++ /dev/null @@ -1,120 +0,0 @@ - -#http://www.compaq.com/fortran/docs/ -import os -import sys - -from numpy.distutils.fcompiler import FCompiler -from distutils.errors import DistutilsPlatformError - -compilers = ['CompaqFCompiler'] -if os.name != 'posix' or sys.platform[:6] == 'cygwin' : - # Otherwise we'd get a false positive on posix systems with - # case-insensitive filesystems (like darwin), because we'll pick - # up /bin/df - compilers.append('CompaqVisualFCompiler') - -class CompaqFCompiler(FCompiler): - - compiler_type = 'compaq' - description = 'Compaq Fortran Compiler' - version_pattern = r'Compaq Fortran (?P[^\s]*).*' - - if sys.platform[:5]=='linux': - fc_exe = 'fort' - else: - fc_exe = 'f90' - - executables = { - 'version_cmd' : ['', "-version"], - 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"], - 'compiler_fix' : [fc_exe, "-fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = '-module ' # not tested - module_include_switch = '-I' - - def get_flags(self): - return ['-assume no2underscore', '-nomixed_str_len_arg'] - def get_flags_debug(self): - return ['-g', '-check bounds'] - def get_flags_opt(self): - return ['-O4', '-align dcommons', '-assume bigarrays', - '-assume nozsize', '-math_library fast'] - def get_flags_arch(self): - return ['-arch host', '-tune host'] - def get_flags_linker_so(self): - if sys.platform[:5]=='linux': - return ['-shared'] - return ['-shared', '-Wl,-expect_unresolved,*'] - -class CompaqVisualFCompiler(FCompiler): - - compiler_type = 'compaqv' - description = 'DIGITAL or Compaq Visual Fortran Compiler' - version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler' - r' Version (?P[^\s]*).*') - - compile_switch = '/compile_only' - object_switch = '/object:' - library_switch = '/OUT:' #No space after /OUT:! - - static_lib_extension = ".lib" - static_lib_format = "%s%s" - module_dir_switch = '/module:' - module_include_switch = '/I' - - ar_exe = 'lib.exe' - fc_exe = 'DF' - - if sys.platform=='win32': - from numpy.distutils.msvccompiler import MSVCCompiler - - try: - m = MSVCCompiler() - m.initialize() - ar_exe = m.lib - except DistutilsPlatformError: - pass - except AttributeError as e: - if '_MSVCCompiler__root' in str(e): - print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e)) - else: - raise - except OSError as e: - if not "vcvarsall.bat" in str(e): - print("Unexpected OSError in", __file__) - raise - except ValueError as e: - if not "'path'" in str(e): - print("Unexpected ValueError in", __file__) - raise - - executables = { - 'version_cmd' : ['', "/what"], - 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"], - 'compiler_fix' : [fc_exe, "/fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : [ar_exe, "/OUT:"], - 'ranlib' : None - } - - def get_flags(self): - return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)', - '/names:lowercase', '/assume:underscore'] - def get_flags_opt(self): - return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast'] - def get_flags_arch(self): - return ['/threads'] - def get_flags_debug(self): - return ['/debug'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='compaq').get_version()) diff --git a/numpy/distutils/fcompiler/environment.py b/numpy/distutils/fcompiler/environment.py deleted file mode 100644 index ecd4d9989279..000000000000 --- a/numpy/distutils/fcompiler/environment.py +++ /dev/null @@ -1,88 +0,0 @@ -import os -from distutils.dist import Distribution - -__metaclass__ = type - -class EnvironmentConfig: - def __init__(self, distutils_section='ALL', **kw): - self._distutils_section = distutils_section - self._conf_keys = kw - self._conf = None - self._hook_handler = None - - def dump_variable(self, name): - conf_desc = self._conf_keys[name] - hook, envvar, confvar, convert, append = conf_desc - if not convert: - convert = lambda x : x - print('%s.%s:' % (self._distutils_section, name)) - v = self._hook_handler(name, hook) - print(' hook : %s' % (convert(v),)) - if envvar: - v = os.environ.get(envvar, None) - print(' environ: %s' % (convert(v),)) - if confvar and self._conf: - v = self._conf.get(confvar, (None, None))[1] - print(' config : %s' % (convert(v),)) - - def dump_variables(self): - for name in self._conf_keys: - self.dump_variable(name) - - def __getattr__(self, name): - try: - conf_desc = self._conf_keys[name] - except KeyError: - raise AttributeError( - f"'EnvironmentConfig' object has no attribute '{name}'" - ) from None - - return self._get_var(name, conf_desc) - - def get(self, name, default=None): - try: - conf_desc = self._conf_keys[name] - except KeyError: - return default - var = self._get_var(name, conf_desc) - if var is None: - var = default - return var - - def _get_var(self, name, conf_desc): - hook, envvar, confvar, convert, append = conf_desc - if convert is None: - convert = lambda x: x - var = self._hook_handler(name, hook) - if envvar is not None: - envvar_contents = os.environ.get(envvar) - if envvar_contents is not None: - envvar_contents = convert(envvar_contents) - if var and append: - if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1': - var.extend(envvar_contents) - else: - # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0 - # to keep old (overwrite flags rather than append to - # them) behavior - var = envvar_contents - else: - var = envvar_contents - if confvar is not None and self._conf: - if confvar in self._conf: - source, confvar_contents = self._conf[confvar] - var = convert(confvar_contents) - return var - - - def clone(self, hook_handler): - ec = self.__class__(distutils_section=self._distutils_section, - **self._conf_keys) - ec._hook_handler = hook_handler - return ec - - def use_distribution(self, dist): - if isinstance(dist, Distribution): - self._conf = dist.get_option_dict(self._distutils_section) - else: - self._conf = dist diff --git a/numpy/distutils/fcompiler/fujitsu.py b/numpy/distutils/fcompiler/fujitsu.py deleted file mode 100644 index ddce67456d18..000000000000 --- a/numpy/distutils/fcompiler/fujitsu.py +++ /dev/null @@ -1,46 +0,0 @@ -""" -fujitsu - -Supports Fujitsu compiler function. -This compiler is developed by Fujitsu and is used in A64FX on Fugaku. -""" -from numpy.distutils.fcompiler import FCompiler - -compilers = ['FujitsuFCompiler'] - -class FujitsuFCompiler(FCompiler): - compiler_type = 'fujitsu' - description = 'Fujitsu Fortran Compiler' - - possible_executables = ['frt'] - version_pattern = r'frt \(FRT\) (?P[a-z\d.]+)' - # $ frt --version - # frt (FRT) x.x.x yyyymmdd - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["frt", "-Fixed"], - 'compiler_fix' : ["frt", "-Fixed"], - 'compiler_f90' : ["frt"], - 'linker_so' : ["frt", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-KPIC'] - module_dir_switch = '-M' - module_include_switch = '-I' - - def get_flags_opt(self): - return ['-O3'] - def get_flags_debug(self): - return ['-g'] - def runtime_library_dir_option(self, dir): - return f'-Wl,-rpath={dir}' - def get_libraries(self): - return ['fj90f', 'fj90i', 'fjsrcinfo'] - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler('fujitsu').get_version()) diff --git a/numpy/distutils/fcompiler/g95.py b/numpy/distutils/fcompiler/g95.py deleted file mode 100644 index e109a972a872..000000000000 --- a/numpy/distutils/fcompiler/g95.py +++ /dev/null @@ -1,42 +0,0 @@ -# http://g95.sourceforge.net/ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['G95FCompiler'] - -class G95FCompiler(FCompiler): - compiler_type = 'g95' - description = 'G95 Fortran Compiler' - -# version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95!) May 22 2006) - - version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95 (?P.*)!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006) - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["g95", "-ffixed-form"], - 'compiler_fix' : ["g95", "-ffixed-form"], - 'compiler_f90' : ["g95"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fpic'] - module_dir_switch = '-fmod=' - module_include_switch = '-I' - - def get_flags(self): - return ['-fno-second-underscore'] - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler('g95').get_version()) diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py deleted file mode 100644 index 474ee35945b2..000000000000 --- a/numpy/distutils/fcompiler/gnu.py +++ /dev/null @@ -1,555 +0,0 @@ -import re -import os -import sys -import warnings -import platform -import tempfile -import hashlib -import base64 -import subprocess -from subprocess import Popen, PIPE, STDOUT -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.fcompiler import FCompiler -from distutils.version import LooseVersion - -compilers = ['GnuFCompiler', 'Gnu95FCompiler'] - -TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)") - -# XXX: handle cross compilation - - -def is_win64(): - return sys.platform == "win32" and platform.architecture()[0] == "64bit" - - -class GnuFCompiler(FCompiler): - compiler_type = 'gnu' - compiler_aliases = ('g77', ) - description = 'GNU Fortran 77 compiler' - - def gnu_version_match(self, version_string): - """Handle the different versions of GNU fortran compilers""" - # Strip warning(s) that may be emitted by gfortran - while version_string.startswith('gfortran: warning'): - version_string =\ - version_string[version_string.find('\n') + 1:].strip() - - # Gfortran versions from after 2010 will output a simple string - # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older - # gfortrans may still return long version strings (``-dumpversion`` was - # an alias for ``--version``) - if len(version_string) <= 20: - # Try to find a valid version string - m = re.search(r'([0-9.]+)', version_string) - if m: - # g77 provides a longer version string that starts with GNU - # Fortran - if version_string.startswith('GNU Fortran'): - return ('g77', m.group(1)) - - # gfortran only outputs a version string such as #.#.#, so check - # if the match is at the start of the string - elif m.start() == 0: - return ('gfortran', m.group(1)) - else: - # Output probably from --version, try harder: - m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) - if m: - return ('gfortran', m.group(1)) - m = re.search( - r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string) - if m: - v = m.group(1) - if v.startswith(('0', '2', '3')): - # the '0' is for early g77's - return ('g77', v) - else: - # at some point in the 4.x series, the ' 95' was dropped - # from the version string - return ('gfortran', v) - - # If still nothing, raise an error to make the problem easy to find. - err = 'A valid Fortran version was not found in this string:\n' - raise ValueError(err + version_string) - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'g77': - return None - return v[1] - - possible_executables = ['g77', 'f77'] - executables = { - 'version_cmd' : [None, "-dumpversion"], - 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], - 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes - 'compiler_fix' : None, - 'linker_so' : [None, "-g", "-Wall"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-g", "-Wall"] - } - module_dir_switch = None - module_include_switch = None - - # Cygwin: f771: warning: -fPIC ignored for target (all code is - # position independent) - if os.name != 'nt' and sys.platform != 'cygwin': - pic_flags = ['-fPIC'] - - # use -mno-cygwin for g77 when Python is not Cygwin-Python - if sys.platform == 'win32': - for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: - executables[key].append('-mno-cygwin') - - g2c = 'g2c' - suggested_f90_compiler = 'gnu95' - - def get_flags_linker_so(self): - opt = self.linker_so[1:] - if sys.platform == 'darwin': - target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) - # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value - # and leave it alone. But, distutils will complain if the - # environment's value is different from the one in the Python - # Makefile used to build Python. We let distutils handle this - # error checking. - if not target: - # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, - # we try to get it first from sysconfig and then - # fall back to setting it to 10.9 This is a reasonable default - # even when using the official Python dist and those derived - # from it. - import sysconfig - target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') - if not target: - target = '10.9' - s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}' - warnings.warn(s, stacklevel=2) - os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(target) - opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) - else: - opt.append("-shared") - if sys.platform.startswith('sunos'): - # SunOS often has dynamically loaded symbols defined in the - # static library libg2c.a The linker doesn't like this. To - # ignore the problem, use the -mimpure-text flag. It isn't - # the safest thing, but seems to work. 'man gcc' says: - # ".. Instead of using -mimpure-text, you should compile all - # source code with -fpic or -fPIC." - opt.append('-mimpure-text') - return opt - - def get_libgcc_dir(self): - try: - output = subprocess.check_output(self.compiler_f77 + - ['-print-libgcc-file-name']) - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - return os.path.dirname(output) - return None - - def get_libgfortran_dir(self): - if sys.platform[:5] == 'linux': - libgfortran_name = 'libgfortran.so' - elif sys.platform == 'darwin': - libgfortran_name = 'libgfortran.dylib' - else: - libgfortran_name = None - - libgfortran_dir = None - if libgfortran_name: - find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)] - try: - output = subprocess.check_output( - self.compiler_f77 + find_lib_arg) - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - libgfortran_dir = os.path.dirname(output) - return libgfortran_dir - - def get_library_dirs(self): - opt = [] - if sys.platform[:5] != 'linux': - d = self.get_libgcc_dir() - if d: - # if windows and not cygwin, libg2c lies in a different folder - if sys.platform == 'win32' and not d.startswith('/usr/lib'): - d = os.path.normpath(d) - path = os.path.join(d, "lib%s.a" % self.g2c) - if not os.path.exists(path): - root = os.path.join(d, *((os.pardir, ) * 4)) - d2 = os.path.abspath(os.path.join(root, 'lib')) - path = os.path.join(d2, "lib%s.a" % self.g2c) - if os.path.exists(path): - opt.append(d2) - opt.append(d) - # For Macports / Linux, libgfortran and libgcc are not co-located - lib_gfortran_dir = self.get_libgfortran_dir() - if lib_gfortran_dir: - opt.append(lib_gfortran_dir) - return opt - - def get_libraries(self): - opt = [] - d = self.get_libgcc_dir() - if d is not None: - g2c = self.g2c + '-pic' - f = self.static_lib_format % (g2c, self.static_lib_extension) - if not os.path.isfile(os.path.join(d, f)): - g2c = self.g2c - else: - g2c = self.g2c - - if g2c is not None: - opt.append(g2c) - c_compiler = self.c_compiler - if sys.platform == 'win32' and c_compiler and \ - c_compiler.compiler_type == 'msvc': - opt.append('gcc') - if sys.platform == 'darwin': - opt.append('cc_dynamic') - return opt - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - v = self.get_version() - if v and v <= '3.3.3': - # With this compiler version building Fortran BLAS/LAPACK - # with -O3 caused failures in lib.lapack heevr,syevr tests. - opt = ['-O2'] - else: - opt = ['-O3'] - opt.append('-funroll-loops') - return opt - - def _c_arch_flags(self): - """ Return detected arch flags from CFLAGS """ - import sysconfig - try: - cflags = sysconfig.get_config_vars()['CFLAGS'] - except KeyError: - return [] - arch_re = re.compile(r"-arch\s+(\w+)") - arch_flags = [] - for arch in arch_re.findall(cflags): - arch_flags += ['-arch', arch] - return arch_flags - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - if sys.platform == 'win32' or sys.platform == 'cygwin': - # Linux/Solaris/Unix support RPATH, Windows does not - raise NotImplementedError - - # TODO: could use -Xlinker here, if it's supported - assert "," not in dir - - if sys.platform == 'darwin': - return f'-Wl,-rpath,{dir}' - elif sys.platform.startswith(('aix', 'os400')): - # AIX RPATH is called LIBPATH - return f'-Wl,-blibpath:{dir}' - else: - return f'-Wl,-rpath={dir}' - - -class Gnu95FCompiler(GnuFCompiler): - compiler_type = 'gnu95' - compiler_aliases = ('gfortran', ) - description = 'GNU Fortran 95 compiler' - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'gfortran': - return None - v = v[1] - if LooseVersion(v) >= "4": - # gcc-4 series releases do not support -mno-cygwin option - pass - else: - # use -mno-cygwin flag for gfortran when Python is not - # Cygwin-Python - if sys.platform == 'win32': - for key in [ - 'version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe' - ]: - self.executables[key].append('-mno-cygwin') - return v - - possible_executables = ['gfortran', 'f95'] - executables = { - 'version_cmd' : ["", "-dumpversion"], - 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", - "-fno-second-underscore"], - 'compiler_f90' : [None, "-Wall", "-g", - "-fno-second-underscore"], - 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", - "-fno-second-underscore"], - 'linker_so' : ["", "-Wall", "-g"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-Wall"] - } - - module_dir_switch = '-J' - module_include_switch = '-I' - - if sys.platform.startswith(('aix', 'os400')): - executables['linker_so'].append('-lpthread') - if platform.architecture()[0][:2] == '64': - for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']: - executables[key].append('-maix64') - - g2c = 'gfortran' - - def _universal_flags(self, cmd): - """Return a list of -arch flags for every supported architecture.""" - if not sys.platform == 'darwin': - return [] - arch_flags = [] - # get arches the C compiler gets. - c_archs = self._c_arch_flags() - if "i386" in c_archs: - c_archs[c_archs.index("i386")] = "i686" - # check the arches the Fortran compiler supports, and compare with - # arch flags from C compiler - for arch in ["ppc", "i686", "x86_64", "ppc64", "s390x"]: - if _can_target(cmd, arch) and arch in c_archs: - arch_flags.extend(["-arch", arch]) - return arch_flags - - def get_flags(self): - flags = GnuFCompiler.get_flags(self) - arch_flags = self._universal_flags(self.compiler_f90) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_flags_linker_so(self): - flags = GnuFCompiler.get_flags_linker_so(self) - arch_flags = self._universal_flags(self.linker_so) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_library_dirs(self): - opt = GnuFCompiler.get_library_dirs(self) - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - target = self.get_target() - if target: - d = os.path.normpath(self.get_libgcc_dir()) - root = os.path.join(d, *((os.pardir, ) * 4)) - path = os.path.join(root, "lib") - mingwdir = os.path.normpath(path) - if os.path.exists(os.path.join(mingwdir, "libmingwex.a")): - opt.append(mingwdir) - # For Macports / Linux, libgfortran and libgcc are not co-located - lib_gfortran_dir = self.get_libgfortran_dir() - if lib_gfortran_dir: - opt.append(lib_gfortran_dir) - return opt - - def get_libraries(self): - opt = GnuFCompiler.get_libraries(self) - if sys.platform == 'darwin': - opt.remove('cc_dynamic') - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - if "gcc" in opt: - i = opt.index("gcc") - opt.insert(i + 1, "mingwex") - opt.insert(i + 1, "mingw32") - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - return [] - else: - pass - return opt - - def get_target(self): - try: - p = subprocess.Popen( - self.compiler_f77 + ['-v'], - stdin=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - stdout, stderr = p.communicate() - output = (stdout or b"") + (stderr or b"") - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - m = TARGET_R.search(output) - if m: - return m.group(1) - return "" - - def _hash_files(self, filenames): - h = hashlib.sha1() - for fn in filenames: - with open(fn, 'rb') as f: - while True: - block = f.read(131072) - if not block: - break - h.update(block) - text = base64.b32encode(h.digest()) - text = text.decode('ascii') - return text.rstrip('=') - - def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir, - chained_dlls, is_archive): - """Create a wrapper shared library for the given objects - - Return an MSVC-compatible lib - """ - - c_compiler = self.c_compiler - if c_compiler.compiler_type != "msvc": - raise ValueError("This method only supports MSVC") - - object_hash = self._hash_files(list(objects) + list(chained_dlls)) - - if is_win64(): - tag = 'win_amd64' - else: - tag = 'win32' - - basename = 'lib' + os.path.splitext( - os.path.basename(objects[0]))[0][:8] - root_name = basename + '.' + object_hash + '.gfortran-' + tag - dll_name = root_name + '.dll' - def_name = root_name + '.def' - lib_name = root_name + '.lib' - dll_path = os.path.join(extra_dll_dir, dll_name) - def_path = os.path.join(output_dir, def_name) - lib_path = os.path.join(output_dir, lib_name) - - if os.path.isfile(lib_path): - # Nothing to do - return lib_path, dll_path - - if is_archive: - objects = (["-Wl,--whole-archive"] + list(objects) + - ["-Wl,--no-whole-archive"]) - self.link_shared_object( - objects, - dll_name, - output_dir=extra_dll_dir, - extra_postargs=list(chained_dlls) + [ - '-Wl,--allow-multiple-definition', - '-Wl,--output-def,' + def_path, - '-Wl,--export-all-symbols', - '-Wl,--enable-auto-import', - '-static', - '-mlong-double-64', - ]) - - # No PowerPC! - if is_win64(): - specifier = '/MACHINE:X64' - else: - specifier = '/MACHINE:X86' - - # MSVC specific code - lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier] - if not c_compiler.initialized: - c_compiler.initialize() - c_compiler.spawn([c_compiler.lib] + lib_args) - - return lib_path, dll_path - - def can_ccompiler_link(self, compiler): - # MSVC cannot link objects compiled by GNU fortran - return compiler.compiler_type not in ("msvc", ) - - def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): - """ - Convert a set of object files that are not compatible with the default - linker, to a file that is compatible. - """ - if self.c_compiler.compiler_type == "msvc": - # Compile a DLL and return the lib for the DLL as - # the object. Also keep track of previous DLLs that - # we have compiled so that we can link against them. - - # If there are .a archives, assume they are self-contained - # static libraries, and build separate DLLs for each - archives = [] - plain_objects = [] - for obj in objects: - if obj.lower().endswith('.a'): - archives.append(obj) - else: - plain_objects.append(obj) - - chained_libs = [] - chained_dlls = [] - for archive in archives[::-1]: - lib, dll = self._link_wrapper_lib( - [archive], - output_dir, - extra_dll_dir, - chained_dlls=chained_dlls, - is_archive=True) - chained_libs.insert(0, lib) - chained_dlls.insert(0, dll) - - if not plain_objects: - return chained_libs - - lib, dll = self._link_wrapper_lib( - plain_objects, - output_dir, - extra_dll_dir, - chained_dlls=chained_dlls, - is_archive=False) - return [lib] + chained_libs - else: - raise ValueError("Unsupported C compiler") - - -def _can_target(cmd, arch): - """Return true if the architecture supports the -arch flag""" - newcmd = cmd[:] - fid, filename = tempfile.mkstemp(suffix=".f") - os.close(fid) - try: - d = os.path.dirname(filename) - output = os.path.splitext(filename)[0] + ".o" - try: - newcmd.extend(["-arch", arch, "-c", filename]) - p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d) - p.communicate() - return p.returncode == 0 - finally: - if os.path.exists(output): - os.remove(output) - finally: - os.remove(filename) - - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - - print(customized_fcompiler('gnu').get_version()) - try: - print(customized_fcompiler('g95').get_version()) - except Exception as e: - print(e) diff --git a/numpy/distutils/fcompiler/hpux.py b/numpy/distutils/fcompiler/hpux.py deleted file mode 100644 index 09e6483bf5ad..000000000000 --- a/numpy/distutils/fcompiler/hpux.py +++ /dev/null @@ -1,41 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['HPUXFCompiler'] - -class HPUXFCompiler(FCompiler): - - compiler_type = 'hpux' - description = 'HP Fortran 90 Compiler' - version_pattern = r'HP F90 (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["f90", "+version"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["ld", "-b"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['+Z'] - def get_flags(self): - return self.pic_flags + ['+ppu', '+DD64'] - def get_flags_opt(self): - return ['-O3'] - def get_libraries(self): - return ['m'] - def get_library_dirs(self): - opt = ['/usr/lib/hpux64'] - return opt - def get_version(self, force=0, ok_status=[256, 0, 1]): - # XXX status==256 may indicate 'unrecognized option' or - # 'no input file'. So, version_cmd needs more work. - return FCompiler.get_version(self, force, ok_status) - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(10) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='hpux').get_version()) diff --git a/numpy/distutils/fcompiler/ibm.py b/numpy/distutils/fcompiler/ibm.py deleted file mode 100644 index 29927518c703..000000000000 --- a/numpy/distutils/fcompiler/ibm.py +++ /dev/null @@ -1,97 +0,0 @@ -import os -import re -import sys -import subprocess - -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.exec_command import find_executable -from numpy.distutils.misc_util import make_temp_file -from distutils import log - -compilers = ['IBMFCompiler'] - -class IBMFCompiler(FCompiler): - compiler_type = 'ibm' - description = 'IBM XL Fortran Compiler' - version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P[^\s*]*)' - #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 - - executables = { - 'version_cmd' : ["", "-qversion"], - 'compiler_f77' : ["xlf"], - 'compiler_fix' : ["xlf90", "-qfixed"], - 'compiler_f90' : ["xlf90"], - 'linker_so' : ["xlf95"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_version(self,*args,**kwds): - version = FCompiler.get_version(self,*args,**kwds) - - if version is None and sys.platform.startswith('aix'): - # use lslpp to find out xlf version - lslpp = find_executable('lslpp') - xlf = find_executable('xlf') - if os.path.exists(xlf) and os.path.exists(lslpp): - try: - o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp']) - except (OSError, subprocess.CalledProcessError): - pass - else: - m = re.search(r'xlfcmp:(?P\d+([.]\d+)+)', o) - if m: version = m.group('version') - - xlf_dir = '/etc/opt/ibmcmp/xlf' - if version is None and os.path.isdir(xlf_dir): - # linux: - # If the output of xlf does not contain version info - # (that's the case with xlf 8.1, for instance) then - # let's try another method: - l = sorted(os.listdir(xlf_dir)) - l.reverse() - l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] - if l: - from distutils.version import LooseVersion - self.version = version = LooseVersion(l[0]) - return version - - def get_flags(self): - return ['-qextname'] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_linker_so(self): - opt = [] - if sys.platform=='darwin': - opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') - else: - opt.append('-bshared') - version = self.get_version(ok_status=[0, 40]) - if version is not None: - if sys.platform.startswith('aix'): - xlf_cfg = '/etc/xlf.cfg' - else: - xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version - fo, new_cfg = make_temp_file(suffix='_xlf.cfg') - log.info('Creating '+new_cfg) - with open(xlf_cfg) as fi: - crt1_match = re.compile(r'\s*crt\s*=\s*(?P.*)/crt1.o').match - for line in fi: - m = crt1_match(line) - if m: - fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) - else: - fo.write(line) - fo.close() - opt.append('-F'+new_cfg) - return opt - - def get_flags_opt(self): - return ['-O3'] - -if __name__ == '__main__': - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler(compiler='ibm').get_version()) diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py deleted file mode 100644 index 1d6065904110..000000000000 --- a/numpy/distutils/fcompiler/intel.py +++ /dev/null @@ -1,211 +0,0 @@ -# http://developer.intel.com/software/products/compilers/flin/ -import sys - -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file - -compilers = ['IntelFCompiler', 'IntelVisualFCompiler', - 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', - 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler'] - - -def intel_version_match(type): - # Match against the important stuff in the version string - return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) - - -class BaseIntelFCompiler(FCompiler): - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '-FI', '-V', '-c', - f + '.f', '-o', f + '.o'] - - def runtime_library_dir_option(self, dir): - # TODO: could use -Xlinker here, if it's supported - assert "," not in dir - - return '-Wl,-rpath=%s' % dir - - -class IntelFCompiler(BaseIntelFCompiler): - - compiler_type = 'intel' - compiler_aliases = ('ifort',) - description = 'Intel Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - possible_executables = ['ifort', 'ifc'] - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : [None, "-72", "-w90", "-w95"], - 'compiler_f90' : [None], - 'compiler_fix' : [None, "-FI"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags_free(self): - return ['-FR'] - - def get_flags(self): - return ['-fPIC'] - - def get_flags_opt(self): # Scipy test failures with -O2 - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - return ['-fp-model', 'strict', '-O1', - '-assume', 'minus0', '-{}'.format(mpopt)] - - def get_flags_arch(self): - return [] - - def get_flags_linker_so(self): - opt = FCompiler.get_flags_linker_so(self) - v = self.get_version() - if v and v >= '8.0': - opt.append('-nofor_main') - if sys.platform == 'darwin': - # Here, it's -dynamiclib - try: - idx = opt.index('-shared') - opt.remove('-shared') - except ValueError: - idx = 0 - opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup'] - return opt - - -class IntelItaniumFCompiler(IntelFCompiler): - compiler_type = 'intele' - compiler_aliases = () - description = 'Intel Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium|IA-64') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - -class IntelEM64TFCompiler(IntelFCompiler): - compiler_type = 'intelem' - compiler_aliases = () - description = 'Intel Fortran Compiler for 64-bit apps' - - version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - -# Is there no difference in the version string between the above compilers -# and the Visual compilers? - - -class IntelVisualFCompiler(BaseIntelFCompiler): - compiler_type = 'intelv' - description = 'Intel Visual Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '/FI', '/c', - f + '.f', '/o', f + '.o'] - - ar_exe = 'lib.exe' - possible_executables = ['ifort', 'ifl'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None], - 'compiler_fix' : [None], - 'compiler_f90' : [None], - 'linker_so' : [None], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - - compile_switch = '/c ' - object_switch = '/Fo' # No space after /Fo! - library_switch = '/OUT:' # No space after /OUT:! - module_dir_switch = '/module:' # No space after /module: - module_include_switch = '/I' - - def get_flags(self): - opt = ['/nologo', '/MD', '/nbs', '/names:lowercase', - '/assume:underscore', '/fpp'] - return opt - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['/4Yb', '/d2'] - - def get_flags_opt(self): - return ['/O1', '/assume:minus0'] # Scipy test failures with /O2 - - def get_flags_arch(self): - return ["/arch:IA32", "/QaxSSE3"] - - def runtime_library_dir_option(self, dir): - raise NotImplementedError - - -class IntelItaniumVisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelev' - description = 'Intel Visual Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium') - - possible_executables = ['efl'] # XXX this is a wild guess - ar_exe = IntelVisualFCompiler.ar_exe - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI", "-4L72", "-w"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - - -class IntelEM64VisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelvem' - description = 'Intel Visual Fortran Compiler for 64-bit apps' - - version_match = simple_version_match(start=r'Intel\(R\).*?64,') - - def get_flags_arch(self): - return [] - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='intel').get_version()) diff --git a/numpy/distutils/fcompiler/lahey.py b/numpy/distutils/fcompiler/lahey.py deleted file mode 100644 index e925838268b8..000000000000 --- a/numpy/distutils/fcompiler/lahey.py +++ /dev/null @@ -1,45 +0,0 @@ -import os - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['LaheyFCompiler'] - -class LaheyFCompiler(FCompiler): - - compiler_type = 'lahey' - description = 'Lahey/Fujitsu Fortran 95 Compiler' - version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P[^\s*]*)' - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["lf95", "--fix"], - 'compiler_fix' : ["lf95", "--fix"], - 'compiler_f90' : ["lf95"], - 'linker_so' : ["lf95", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g', '--chk', '--chkglobal'] - def get_library_dirs(self): - opt = [] - d = os.environ.get('LAHEY') - if d: - opt.append(os.path.join(d, 'lib')) - return opt - def get_libraries(self): - opt = [] - opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6']) - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='lahey').get_version()) diff --git a/numpy/distutils/fcompiler/mips.py b/numpy/distutils/fcompiler/mips.py deleted file mode 100644 index a0973804571b..000000000000 --- a/numpy/distutils/fcompiler/mips.py +++ /dev/null @@ -1,54 +0,0 @@ -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler - -compilers = ['MIPSFCompiler'] - -class MIPSFCompiler(FCompiler): - - compiler_type = 'mips' - description = 'MIPSpro Fortran Compiler' - version_pattern = r'MIPSpro Compilers: Version (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["", "-version"], - 'compiler_f77' : ["f77", "-f77"], - 'compiler_fix' : ["f90", "-fixedform"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["f90", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : None - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['-KPIC'] - - def get_flags(self): - return self.pic_flags + ['-n32'] - def get_flags_opt(self): - return ['-O3'] - def get_flags_arch(self): - opt = [] - for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): - if getattr(cpu, 'is_IP%s'%a)(): - opt.append('-TARG:platform=IP%s' % a) - break - return opt - def get_flags_arch_f77(self): - r = None - if cpu.is_r10000(): r = 10000 - elif cpu.is_r12000(): r = 12000 - elif cpu.is_r8000(): r = 8000 - elif cpu.is_r5000(): r = 5000 - elif cpu.is_r4000(): r = 4000 - if r is not None: - return ['r%s' % (r)] - return [] - def get_flags_arch_f90(self): - r = self.get_flags_arch_f77() - if r: - r[0] = '-' + r[0] - return r - -if __name__ == '__main__': - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='mips').get_version()) diff --git a/numpy/distutils/fcompiler/nag.py b/numpy/distutils/fcompiler/nag.py deleted file mode 100644 index 939201f44e02..000000000000 --- a/numpy/distutils/fcompiler/nag.py +++ /dev/null @@ -1,87 +0,0 @@ -import sys -import re -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NAGFCompiler', 'NAGFORCompiler'] - -class BaseNAGFCompiler(FCompiler): - version_pattern = r'NAG.* Release (?P[^(\s]*)' - - def version_match(self, version_string): - m = re.search(self.version_pattern, version_string) - if m: - return m.group('version') - else: - return None - - def get_flags_linker_so(self): - return ["-Wl,-shared"] - def get_flags_opt(self): - return ['-O4'] - def get_flags_arch(self): - return [] - -class NAGFCompiler(BaseNAGFCompiler): - - compiler_type = 'nag' - description = 'NAGWare Fortran 95 Compiler' - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f95", "-fixed"], - 'compiler_fix' : ["f95", "-fixed"], - 'compiler_f90' : ["f95"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_linker_so(self): - if sys.platform == 'darwin': - return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] - return BaseNAGFCompiler.get_flags_linker_so(self) - def get_flags_arch(self): - version = self.get_version() - if version and version < '5.1': - return ['-target=native'] - else: - return BaseNAGFCompiler.get_flags_arch(self) - def get_flags_debug(self): - return ['-g', '-gline', '-g90', '-nan', '-C'] - -class NAGFORCompiler(BaseNAGFCompiler): - - compiler_type = 'nagfor' - description = 'NAG Fortran Compiler' - - executables = { - 'version_cmd' : ["nagfor", "-V"], - 'compiler_f77' : ["nagfor", "-fixed"], - 'compiler_fix' : ["nagfor", "-fixed"], - 'compiler_f90' : ["nagfor"], - 'linker_so' : ["nagfor"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_linker_so(self): - if sys.platform == 'darwin': - return ['-unsharedrts', - '-Wl,-bundle,-flat_namespace,-undefined,suppress'] - return BaseNAGFCompiler.get_flags_linker_so(self) - def get_flags_debug(self): - version = self.get_version() - if version and version > '6.1': - return ['-g', '-u', '-nan', '-C=all', '-thread_safe', - '-kind=unique', '-Warn=allocation', '-Warn=subnormal'] - else: - return ['-g', '-nan', '-C=all', '-u', '-thread_safe'] - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - compiler = customized_fcompiler(compiler='nagfor') - print(compiler.get_version()) - print(compiler.get_flags_debug()) diff --git a/numpy/distutils/fcompiler/none.py b/numpy/distutils/fcompiler/none.py deleted file mode 100644 index ef411fffc7cb..000000000000 --- a/numpy/distutils/fcompiler/none.py +++ /dev/null @@ -1,28 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils import customized_fcompiler - -compilers = ['NoneFCompiler'] - -class NoneFCompiler(FCompiler): - - compiler_type = 'none' - description = 'Fake Fortran compiler' - - executables = {'compiler_f77': None, - 'compiler_f90': None, - 'compiler_fix': None, - 'linker_so': None, - 'linker_exe': None, - 'archiver': None, - 'ranlib': None, - 'version_cmd': None, - } - - def find_executables(self): - pass - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - print(customized_fcompiler(compiler='none').get_version()) diff --git a/numpy/distutils/fcompiler/nv.py b/numpy/distutils/fcompiler/nv.py deleted file mode 100644 index f518c8b0027a..000000000000 --- a/numpy/distutils/fcompiler/nv.py +++ /dev/null @@ -1,53 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NVHPCFCompiler'] - -class NVHPCFCompiler(FCompiler): - """ NVIDIA High Performance Computing (HPC) SDK Fortran Compiler - - https://developer.nvidia.com/hpc-sdk - - Since august 2020 the NVIDIA HPC SDK includes the compilers formerly known as The Portland Group compilers, - https://www.pgroup.com/index.htm. - See also `numpy.distutils.fcompiler.pg`. - """ - - compiler_type = 'nv' - description = 'NVIDIA HPC SDK' - version_pattern = r'\s*(nvfortran|.+ \(aka nvfortran\)) (?P[\d.-]+).*' - - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["nvfortran"], - 'compiler_fix': ["nvfortran", "-Mfixed"], - 'compiler_f90': ["nvfortran"], - 'linker_so': [""], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = ['-fpic'] - - module_dir_switch = '-module ' - module_include_switch = '-I' - - def get_flags(self): - opt = ['-Minform=inform', '-Mnosecond_underscore'] - return self.pic_flags + opt - - def get_flags_opt(self): - return ['-fast'] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_linker_so(self): - return ["-shared", '-fpic'] - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='nv').get_version()) diff --git a/numpy/distutils/fcompiler/pathf95.py b/numpy/distutils/fcompiler/pathf95.py deleted file mode 100644 index 0768cb12e87a..000000000000 --- a/numpy/distutils/fcompiler/pathf95.py +++ /dev/null @@ -1,33 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['PathScaleFCompiler'] - -class PathScaleFCompiler(FCompiler): - - compiler_type = 'pathf95' - description = 'PathScale Fortran Compiler' - version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P[\d.]+)' - - executables = { - 'version_cmd' : ["pathf95", "-version"], - 'compiler_f77' : ["pathf95", "-fixedform"], - 'compiler_fix' : ["pathf95", "-fixedform"], - 'compiler_f90' : ["pathf95"], - 'linker_so' : ["pathf95", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags_opt(self): - return ['-O3'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='pathf95').get_version()) diff --git a/numpy/distutils/fcompiler/pg.py b/numpy/distutils/fcompiler/pg.py deleted file mode 100644 index 72442c4fec61..000000000000 --- a/numpy/distutils/fcompiler/pg.py +++ /dev/null @@ -1,128 +0,0 @@ -# http://www.pgroup.com -import sys - -from numpy.distutils.fcompiler import FCompiler -from sys import platform -from os.path import join, dirname, normpath - -compilers = ['PGroupFCompiler', 'PGroupFlangCompiler'] - - -class PGroupFCompiler(FCompiler): - - compiler_type = 'pg' - description = 'Portland Group Fortran Compiler' - version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P[\d.-]+).*' - - if platform == 'darwin': - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["pgfortran", "-dynamiclib"], - 'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"], - 'compiler_f90': ["pgfortran", "-dynamiclib"], - 'linker_so': ["libtool"], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = [''] - else: - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["pgfortran"], - 'compiler_fix': ["pgfortran", "-Mfixed"], - 'compiler_f90': ["pgfortran"], - 'linker_so': [""], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = ['-fpic'] - - module_dir_switch = '-module ' - module_include_switch = '-I' - - def get_flags(self): - opt = ['-Minform=inform', '-Mnosecond_underscore'] - return self.pic_flags + opt - - def get_flags_opt(self): - return ['-fast'] - - def get_flags_debug(self): - return ['-g'] - - if platform == 'darwin': - def get_flags_linker_so(self): - return ["-dynamic", '-undefined', 'dynamic_lookup'] - - else: - def get_flags_linker_so(self): - return ["-shared", '-fpic'] - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - - -import functools - -class PGroupFlangCompiler(FCompiler): - compiler_type = 'flang' - description = 'Portland Group Fortran LLVM Compiler' - version_pattern = r'\s*(flang|clang) version (?P[\d.-]+).*' - - ar_exe = 'lib.exe' - possible_executables = ['flang'] - - executables = { - 'version_cmd': ["", "--version"], - 'compiler_f77': ["flang"], - 'compiler_fix': ["flang"], - 'compiler_f90': ["flang"], - 'linker_so': [None], - 'archiver': [ar_exe, "/verbose", "/OUT:"], - 'ranlib': None - } - - library_switch = '/OUT:' # No space after /OUT:! - module_dir_switch = '-module ' # Don't remove ending space! - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - opt.extend(['flang', 'flangrti', 'ompstub']) - return opt - - @functools.lru_cache(maxsize=128) - def get_library_dirs(self): - """List of compiler library directories.""" - opt = FCompiler.get_library_dirs(self) - flang_dir = dirname(self.executables['compiler_f77'][0]) - opt.append(normpath(join(flang_dir, '..', 'lib'))) - - return opt - - def get_flags(self): - return [] - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - return ['-O3'] - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - raise NotImplementedError - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - if 'flang' in sys.argv: - print(customized_fcompiler(compiler='flang').get_version()) - else: - print(customized_fcompiler(compiler='pg').get_version()) diff --git a/numpy/distutils/fcompiler/sun.py b/numpy/distutils/fcompiler/sun.py deleted file mode 100644 index d039f0b25705..000000000000 --- a/numpy/distutils/fcompiler/sun.py +++ /dev/null @@ -1,51 +0,0 @@ -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler - -compilers = ['SunFCompiler'] - -class SunFCompiler(FCompiler): - - compiler_type = 'sun' - description = 'Sun or Forte Fortran 95 Compiler' - # ex: - # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28 - version_match = simple_version_match( - start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95') - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90", "-fixed"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["", "-Bdynamic", "-G"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = '-moddir=' - module_include_switch = '-M' - pic_flags = ['-xcode=pic32'] - - def get_flags_f77(self): - ret = ["-ftrap=%none"] - if (self.get_version() or '') >= '7': - ret.append("-f77") - else: - ret.append("-fixed") - return ret - def get_opt(self): - return ['-fast', '-dalign'] - def get_arch(self): - return ['-xtarget=generic'] - def get_libraries(self): - opt = [] - opt.extend(['fsu', 'sunmath', 'mvec']) - return opt - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='sun').get_version()) diff --git a/numpy/distutils/fcompiler/vast.py b/numpy/distutils/fcompiler/vast.py deleted file mode 100644 index 92a1647ba437..000000000000 --- a/numpy/distutils/fcompiler/vast.py +++ /dev/null @@ -1,52 +0,0 @@ -import os - -from numpy.distutils.fcompiler.gnu import GnuFCompiler - -compilers = ['VastFCompiler'] - -class VastFCompiler(GnuFCompiler): - compiler_type = 'vast' - compiler_aliases = () - description = 'Pacific-Sierra Research Fortran 90 Compiler' - version_pattern = (r'\s*Pacific-Sierra Research vf90 ' - r'(Personal|Professional)\s+(?P[^\s]*)') - - # VAST f90 does not support -o with -c. So, object files are created - # to the current directory and then moved to build directory - object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' - - executables = { - 'version_cmd' : ["vf90", "-v"], - 'compiler_f77' : ["g77"], - 'compiler_fix' : ["f90", "-Wv,-ya"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def find_executables(self): - pass - - def get_version_cmd(self): - f90 = self.compiler_f90[0] - d, b = os.path.split(f90) - vf90 = os.path.join(d, 'v'+b) - return vf90 - - def get_flags_arch(self): - vast_version = self.get_version() - gnu = GnuFCompiler() - gnu.customize(None) - self.version = gnu.get_version() - opt = GnuFCompiler.get_flags_arch(self) - self.version = vast_version - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='vast').get_version()) diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py deleted file mode 100644 index 90d1f4c384c7..000000000000 --- a/numpy/distutils/from_template.py +++ /dev/null @@ -1,261 +0,0 @@ -#!/usr/bin/env python3 -""" - -process_file(filename) - - takes templated file .xxx.src and produces .xxx file where .xxx - is .pyf .f90 or .f using the following template rules: - - '<..>' denotes a template. - - All function and subroutine blocks in a source file with names that - contain '<..>' will be replicated according to the rules in '<..>'. - - The number of comma-separated words in '<..>' will determine the number of - replicates. - - '<..>' may have two different forms, named and short. For example, - - named: - where anywhere inside a block '

' will be replaced with - 'd', 's', 'z', and 'c' for each replicate of the block. - - <_c> is already defined: <_c=s,d,c,z> - <_t> is already defined: <_t=real,double precision,complex,double complex> - - short: - , a short form of the named, useful when no

appears inside - a block. - - In general, '<..>' contains a comma separated list of arbitrary - expressions. If these expression must contain a comma|leftarrow|rightarrow, - then prepend the comma|leftarrow|rightarrow with a backslash. - - If an expression matches '\\' then it will be replaced - by -th expression. - - Note that all '<..>' forms in a block must have the same number of - comma-separated entries. - - Predefined named template rules: - - - - - - -""" -__all__ = ['process_str', 'process_file'] - -import os -import sys -import re - -routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) -routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) -function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) - -def parse_structure(astr): - """ Return a list of tuples for each function or subroutine each - tuple is the start and end of a subroutine or function to be - expanded. - """ - - spanlist = [] - ind = 0 - while True: - m = routine_start_re.search(astr, ind) - if m is None: - break - start = m.start() - if function_start_re.match(astr, start, m.end()): - while True: - i = astr.rfind('\n', ind, start) - if i==-1: - break - start = i - if astr[i:i+7]!='\n $': - break - start += 1 - m = routine_end_re.search(astr, m.end()) - ind = end = m and m.end()-1 or len(astr) - spanlist.append((start, end)) - return spanlist - -template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") -named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") -list_re = re.compile(r"<\s*((.*?))\s*>") - -def find_repl_patterns(astr): - reps = named_re.findall(astr) - names = {} - for rep in reps: - name = rep[0].strip() or unique_key(names) - repl = rep[1].replace(r'\,', '@comma@') - thelist = conv(repl) - names[name] = thelist - return names - -def find_and_remove_repl_patterns(astr): - names = find_repl_patterns(astr) - astr = re.subn(named_re, '', astr)[0] - return astr, names - -item_re = re.compile(r"\A\\(?P\d+)\Z") -def conv(astr): - b = astr.split(',') - l = [x.strip() for x in b] - for i in range(len(l)): - m = item_re.match(l[i]) - if m: - j = int(m.group('index')) - l[i] = l[j] - return ','.join(l) - -def unique_key(adict): - """ Obtain a unique key given a dictionary.""" - allkeys = list(adict.keys()) - done = False - n = 1 - while not done: - newkey = '__l%s' % (n) - if newkey in allkeys: - n += 1 - else: - done = True - return newkey - - -template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') -def expand_sub(substr, names): - substr = substr.replace(r'\>', '@rightarrow@') - substr = substr.replace(r'\<', '@leftarrow@') - lnames = find_repl_patterns(substr) - substr = named_re.sub(r"<\1>", substr) # get rid of definition templates - - def listrepl(mobj): - thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) - if template_name_re.match(thelist): - return "<%s>" % (thelist) - name = None - for key in lnames.keys(): # see if list is already in dictionary - if lnames[key] == thelist: - name = key - if name is None: # this list is not in the dictionary yet - name = unique_key(lnames) - lnames[name] = thelist - return "<%s>" % name - - substr = list_re.sub(listrepl, substr) # convert all lists to named templates - # newnames are constructed as needed - - numsubs = None - base_rule = None - rules = {} - for r in template_re.findall(substr): - if r not in rules: - thelist = lnames.get(r, names.get(r, None)) - if thelist is None: - raise ValueError('No replicates found for <%s>' % (r)) - if r not in names and not thelist.startswith('_'): - names[r] = thelist - rule = [i.replace('@comma@', ',') for i in thelist.split(',')] - num = len(rule) - - if numsubs is None: - numsubs = num - rules[r] = rule - base_rule = r - elif num == numsubs: - rules[r] = rule - else: - print("Mismatch in number of replacements (base <%s=%s>)" - " for <%s=%s>. Ignoring." % - (base_rule, ','.join(rules[base_rule]), r, thelist)) - if not rules: - return substr - - def namerepl(mobj): - name = mobj.group(1) - return rules.get(name, (k+1)*[name])[k] - - newstr = '' - for k in range(numsubs): - newstr += template_re.sub(namerepl, substr) + '\n\n' - - newstr = newstr.replace('@rightarrow@', '>') - newstr = newstr.replace('@leftarrow@', '<') - return newstr - -def process_str(allstr): - newstr = allstr - writestr = '' - - struct = parse_structure(newstr) - - oldend = 0 - names = {} - names.update(_special_names) - for sub in struct: - cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) - writestr += cleanedstr - names.update(defs) - writestr += expand_sub(newstr[sub[0]:sub[1]], names) - oldend = sub[1] - writestr += newstr[oldend:] - - return writestr - -include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+\.src)['\"]", re.I) - -def resolve_includes(source): - d = os.path.dirname(source) - with open(source) as fid: - lines = [] - for line in fid: - m = include_src_re.match(line) - if m: - fn = m.group('name') - if not os.path.isabs(fn): - fn = os.path.join(d, fn) - if os.path.isfile(fn): - lines.extend(resolve_includes(fn)) - else: - lines.append(line) - else: - lines.append(line) - return lines - -def process_file(source): - lines = resolve_includes(source) - return process_str(''.join(lines)) - -_special_names = find_repl_patterns(''' -<_c=s,d,c,z> -<_t=real,double precision,complex,double complex> - - - - - -''') - -def main(): - try: - file = sys.argv[1] - except IndexError: - fid = sys.stdin - outfile = sys.stdout - else: - fid = open(file, 'r') - (base, ext) = os.path.splitext(file) - newname = base - outfile = open(newname, 'w') - - allstr = fid.read() - writestr = process_str(allstr) - outfile.write(writestr) - - -if __name__ == "__main__": - main() diff --git a/numpy/distutils/fujitsuccompiler.py b/numpy/distutils/fujitsuccompiler.py deleted file mode 100644 index c25900b34f1d..000000000000 --- a/numpy/distutils/fujitsuccompiler.py +++ /dev/null @@ -1,28 +0,0 @@ -from distutils.unixccompiler import UnixCCompiler - -class FujitsuCCompiler(UnixCCompiler): - - """ - Fujitsu compiler. - """ - - compiler_type = 'fujitsu' - cc_exe = 'fcc' - cxx_exe = 'FCC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables( - compiler=cc_compiler + - ' -O3 -Nclang -fPIC', - compiler_so=cc_compiler + - ' -O3 -Nclang -fPIC', - compiler_cxx=cxx_compiler + - ' -O3 -Nclang -fPIC', - linker_exe=cc_compiler + - ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared', - linker_so=cc_compiler + - ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared' - ) diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py deleted file mode 100644 index 77fb39889a29..000000000000 --- a/numpy/distutils/intelccompiler.py +++ /dev/null @@ -1,106 +0,0 @@ -import platform - -from distutils.unixccompiler import UnixCCompiler -from numpy.distutils.exec_command import find_executable -from numpy.distutils.ccompiler import simple_version_match -if platform.system() == 'Windows': - from numpy.distutils.msvc9compiler import MSVCCompiler - - -class IntelCCompiler(UnixCCompiler): - """A modified Intel compiler compatible with a GCC-built Python.""" - compiler_type = 'intel' - cc_exe = 'icc' - cc_args = 'fPIC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - self.cc_exe = ('icc -fPIC -fp-model strict -O3 ' - '-fomit-frame-pointer -{}').format(mpopt) - compiler = self.cc_exe - - if platform.system() == 'Darwin': - shared_flag = '-Wl,-undefined,dynamic_lookup' - else: - shared_flag = '-shared' - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - archiver='xiar' + ' cru', - linker_exe=compiler + ' -shared-intel', - linker_so=compiler + ' ' + shared_flag + - ' -shared-intel') - - -class IntelItaniumCCompiler(IntelCCompiler): - compiler_type = 'intele' - cc_exe = 'icc' - - -class IntelEM64TCCompiler(UnixCCompiler): - """ - A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python. - """ - compiler_type = 'intelem' - cc_exe = 'icc -m64' - cc_args = '-fPIC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 ' - '-fomit-frame-pointer -{}').format(mpopt) - compiler = self.cc_exe - - if platform.system() == 'Darwin': - shared_flag = '-Wl,-undefined,dynamic_lookup' - else: - shared_flag = '-shared' - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - archiver='xiar' + ' cru', - linker_exe=compiler + ' -shared-intel', - linker_so=compiler + ' ' + shared_flag + - ' -shared-intel') - - -if platform.system() == 'Windows': - class IntelCCompilerW(MSVCCompiler): - """ - A modified Intel compiler compatible with an MSVC-built Python. - """ - compiler_type = 'intelw' - compiler_cxx = 'icl' - - def __init__(self, verbose=0, dry_run=0, force=0): - MSVCCompiler.__init__(self, verbose, dry_run, force) - version_match = simple_version_match(start=r'Intel\(R\).*?32,') - self.__version = version_match - - def initialize(self, plat_name=None): - MSVCCompiler.initialize(self, plat_name) - self.cc = self.find_exe('icl.exe') - self.lib = self.find_exe('xilib') - self.linker = self.find_exe('xilink') - self.compile_options = ['/nologo', '/O3', '/MD', '/W3', - '/Qstd=c99'] - self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', - '/Qstd=c99', '/Z7', '/D_DEBUG'] - - class IntelEM64TCCompilerW(IntelCCompilerW): - """ - A modified Intel x86_64 compiler compatible with - a 64bit MSVC-built Python. - """ - compiler_type = 'intelemw' - - def __init__(self, verbose=0, dry_run=0, force=0): - MSVCCompiler.__init__(self, verbose, dry_run, force) - version_match = simple_version_match(start=r'Intel\(R\).*?64,') - self.__version = version_match diff --git a/numpy/distutils/lib2def.py b/numpy/distutils/lib2def.py deleted file mode 100644 index 851682c63310..000000000000 --- a/numpy/distutils/lib2def.py +++ /dev/null @@ -1,116 +0,0 @@ -import re -import sys -import subprocess - -__doc__ = """This module generates a DEF file from the symbols in -an MSVC-compiled DLL import library. It correctly discriminates between -data and functions. The data is collected from the output of the program -nm(1). - -Usage: - python lib2def.py [libname.lib] [output.def] -or - python lib2def.py [libname.lib] > output.def - -libname.lib defaults to python.lib and output.def defaults to stdout - -Author: Robert Kern -Last Update: April 30, 1999 -""" - -__version__ = '0.1a' - -py_ver = "%d%d" % tuple(sys.version_info[:2]) - -DEFAULT_NM = ['nm', '-Cs'] - -DEF_HEADER = """LIBRARY python%s.dll -;CODE PRELOAD MOVEABLE DISCARDABLE -;DATA PRELOAD SINGLE - -EXPORTS -""" % py_ver -# the header of the DEF file - -FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) -DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) - -def parse_cmd(): - """Parses the command-line arguments. - -libfile, deffile = parse_cmd()""" - if len(sys.argv) == 3: - if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': - libfile, deffile = sys.argv[1:] - elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': - deffile, libfile = sys.argv[1:] - else: - print("I'm assuming that your first argument is the library") - print("and the second is the DEF file.") - elif len(sys.argv) == 2: - if sys.argv[1][-4:] == '.def': - deffile = sys.argv[1] - libfile = 'python%s.lib' % py_ver - elif sys.argv[1][-4:] == '.lib': - deffile = None - libfile = sys.argv[1] - else: - libfile = 'python%s.lib' % py_ver - deffile = None - return libfile, deffile - -def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True): - """Returns the output of nm_cmd via a pipe. - -nm_output = getnm(nm_cmd = 'nm -Cs py_lib')""" - p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, text=True) - nm_output, nm_err = p.communicate() - if p.returncode != 0: - raise RuntimeError('failed to run "%s": "%s"' % ( - ' '.join(nm_cmd), nm_err)) - return nm_output - -def parse_nm(nm_output): - """Returns a tuple of lists: dlist for the list of data -symbols and flist for the list of function symbols. - -dlist, flist = parse_nm(nm_output)""" - data = DATA_RE.findall(nm_output) - func = FUNC_RE.findall(nm_output) - - flist = [] - for sym in data: - if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): - flist.append(sym) - - dlist = [] - for sym in data: - if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): - dlist.append(sym) - - dlist.sort() - flist.sort() - return dlist, flist - -def output_def(dlist, flist, header, file = sys.stdout): - """Outputs the final DEF file to a file defaulting to stdout. - -output_def(dlist, flist, header, file = sys.stdout)""" - for data_sym in dlist: - header = header + '\t%s DATA\n' % data_sym - header = header + '\n' # blank line - for func_sym in flist: - header = header + '\t%s\n' % func_sym - file.write(header) - -if __name__ == '__main__': - libfile, deffile = parse_cmd() - if deffile is None: - deffile = sys.stdout - else: - deffile = open(deffile, 'w') - nm_cmd = DEFAULT_NM + [str(libfile)] - nm_output = getnm(nm_cmd, shell=False) - dlist, flist = parse_nm(nm_output) - output_def(dlist, flist, DEF_HEADER, deffile) diff --git a/numpy/distutils/line_endings.py b/numpy/distutils/line_endings.py deleted file mode 100644 index 686e5ebd937f..000000000000 --- a/numpy/distutils/line_endings.py +++ /dev/null @@ -1,77 +0,0 @@ -""" Functions for converting from DOS to UNIX line endings - -""" -import os -import re -import sys - - -def dos2unix(file): - "Replace CRLF with LF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - with open(file, "rb") as fp: - data = fp.read() - if '\0' in data: - print(file, "Binary!") - return - - newdata = re.sub("\r\n", "\n", data) - if newdata != data: - print('dos2unix:', file) - with open(file, "wb") as f: - f.write(newdata) - return file - else: - print(file, 'ok') - -def dos2unix_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - file = dos2unix(full_path) - if file is not None: - modified_files.append(file) - -def dos2unix_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, dos2unix_one_dir, modified_files) - return modified_files -#---------------------------------- - -def unix2dos(file): - "Replace LF with CRLF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - with open(file, "rb") as fp: - data = fp.read() - if '\0' in data: - print(file, "Binary!") - return - newdata = re.sub("\r\n", "\n", data) - newdata = re.sub("\n", "\r\n", newdata) - if newdata != data: - print('unix2dos:', file) - with open(file, "wb") as f: - f.write(newdata) - return file - else: - print(file, 'ok') - -def unix2dos_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - unix2dos(full_path) - if file is not None: - modified_files.append(file) - -def unix2dos_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, unix2dos_one_dir, modified_files) - return modified_files - -if __name__ == "__main__": - dos2unix_dir(sys.argv[1]) diff --git a/numpy/distutils/log.py b/numpy/distutils/log.py deleted file mode 100644 index 3347f56d6fe9..000000000000 --- a/numpy/distutils/log.py +++ /dev/null @@ -1,111 +0,0 @@ -# Colored log -import sys -from distutils.log import * # noqa: F403 -from distutils.log import Log as old_Log -from distutils.log import _global_log - -from numpy.distutils.misc_util import (red_text, default_text, cyan_text, - green_text, is_sequence, is_string) - - -def _fix_args(args,flag=1): - if is_string(args): - return args.replace('%', '%%') - if flag and is_sequence(args): - return tuple([_fix_args(a, flag=0) for a in args]) - return args - - -class Log(old_Log): - def _log(self, level, msg, args): - if level >= self.threshold: - if args: - msg = msg % _fix_args(args) - if 0: - if msg.startswith('copying ') and msg.find(' -> ') != -1: - return - if msg.startswith('byte-compiling '): - return - print(_global_color_map[level](msg)) - sys.stdout.flush() - - def good(self, msg, *args): - """ - If we log WARN messages, log this message as a 'nice' anti-warn - message. - - """ - if WARN >= self.threshold: - if args: - print(green_text(msg % _fix_args(args))) - else: - print(green_text(msg)) - sys.stdout.flush() - - -_global_log.__class__ = Log - -good = _global_log.good - -def set_threshold(level, force=False): - prev_level = _global_log.threshold - if prev_level > DEBUG or force: - # If we're running at DEBUG, don't change the threshold, as there's - # likely a good reason why we're running at this level. - _global_log.threshold = level - if level <= DEBUG: - info('set_threshold: setting threshold to DEBUG level,' - ' it can be changed only with force argument') - else: - info('set_threshold: not changing threshold from DEBUG level' - ' %s to %s' % (prev_level, level)) - return prev_level - -def get_threshold(): - return _global_log.threshold - -def set_verbosity(v, force=False): - prev_level = _global_log.threshold - if v < 0: - set_threshold(ERROR, force) - elif v == 0: - set_threshold(WARN, force) - elif v == 1: - set_threshold(INFO, force) - elif v >= 2: - set_threshold(DEBUG, force) - return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1) - - -_global_color_map = { - DEBUG:cyan_text, - INFO:default_text, - WARN:red_text, - ERROR:red_text, - FATAL:red_text -} - -# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. -set_verbosity(0, force=True) - - -_error = error -_warn = warn -_info = info -_debug = debug - - -def error(msg, *a, **kw): - _error(f"ERROR: {msg}", *a, **kw) - - -def warn(msg, *a, **kw): - _warn(f"WARN: {msg}", *a, **kw) - - -def info(msg, *a, **kw): - _info(f"INFO: {msg}", *a, **kw) - - -def debug(msg, *a, **kw): - _debug(f"DEBUG: {msg}", *a, **kw) diff --git a/numpy/distutils/mingw/gfortran_vs2003_hack.c b/numpy/distutils/mingw/gfortran_vs2003_hack.c deleted file mode 100644 index 485a675d8a1f..000000000000 --- a/numpy/distutils/mingw/gfortran_vs2003_hack.c +++ /dev/null @@ -1,6 +0,0 @@ -int _get_output_format(void) -{ - return 0; -} - -int _imp____lc_codepage = 0; diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py deleted file mode 100644 index 944ba2d03b33..000000000000 --- a/numpy/distutils/mingw32ccompiler.py +++ /dev/null @@ -1,620 +0,0 @@ -""" -Support code for building Python extensions on Windows. - - # NT stuff - # 1. Make sure libpython.a exists for gcc. If not, build it. - # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) - # 3. Force windows to use g77 - -""" -import os -import sys -import subprocess -import re -import textwrap - -# Overwrite certain distutils.ccompiler functions: -import numpy.distutils.ccompiler # noqa: F401 -from numpy.distutils import log -# NT stuff -# 1. Make sure libpython.a exists for gcc. If not, build it. -# 2. Force windows to use gcc (we're struggling with MSVC and g77 support) -# --> this is done in numpy/distutils/ccompiler.py -# 3. Force windows to use g77 - -import distutils.cygwinccompiler -from distutils.unixccompiler import UnixCCompiler - -try: - from distutils.msvccompiler import get_build_version as get_build_msvc_version -except ImportError: - def get_build_msvc_version(): - return None - -from distutils.errors import UnknownFileError -from numpy.distutils.misc_util import (msvc_runtime_library, - msvc_runtime_version, - msvc_runtime_major, - get_build_architecture) - -def get_msvcr_replacement(): - """Replacement for outdated version of get_msvcr from cygwinccompiler""" - msvcr = msvc_runtime_library() - return [] if msvcr is None else [msvcr] - - -# Useful to generate table of symbols from a dll -_START = re.compile(r'\[Ordinal/Name Pointer\] Table') -_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') - -# the same as cygwin plus some additional parameters -class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): - """ A modified MingW32 compiler compatible with an MSVC built Python. - - """ - - compiler_type = 'mingw32' - - def __init__ (self, - verbose=0, - dry_run=0, - force=0): - - distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose, - dry_run, force) - - # **changes: eric jones 4/11/01 - # 1. Check for import library on Windows. Build if it doesn't exist. - - build_import_library() - - # Check for custom msvc runtime library on Windows. Build if it doesn't exist. - msvcr_success = build_msvcr_library() - msvcr_dbg_success = build_msvcr_library(debug=True) - if msvcr_success or msvcr_dbg_success: - # add preprocessor statement for using customized msvcr lib - self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') - - # Define the MSVC version as hint for MinGW - msvcr_version = msvc_runtime_version() - if msvcr_version: - self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version) - - # MS_WIN64 should be defined when building for amd64 on windows, - # but python headers define it only for MS compilers, which has all - # kind of bad consequences, like using Py_ModuleInit4 instead of - # Py_ModuleInit4_64, etc... So we add it here - if get_build_architecture() == 'AMD64': - self.set_executables( - compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', - compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall ' - '-Wstrict-prototypes', - linker_exe='gcc -g', - linker_so='gcc -g -shared') - else: - self.set_executables( - compiler='gcc -O2 -Wall', - compiler_so='gcc -O2 -Wall -Wstrict-prototypes', - linker_exe='g++ ', - linker_so='g++ -shared') - # added for python2.3 support - # we can't pass it through set_executables because pre 2.2 would fail - self.compiler_cxx = ['g++'] - - # Maybe we should also append -mthreads, but then the finished dlls - # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support - # thread-safe exception handling on `Mingw32') - - # no additional libraries needed - #self.dll_libraries=[] - return - - # __init__ () - - def link(self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - export_symbols = None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - # Include the appropriate MSVC runtime library if Python was built - # with MSVC >= 7.0 (MinGW standard is msvcrt) - runtime_library = msvc_runtime_library() - if runtime_library: - if not libraries: - libraries = [] - libraries.append(runtime_library) - args = (self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - None, #export_symbols, we do this in our def-file - debug, - extra_preargs, - extra_postargs, - build_temp, - target_lang) - func = UnixCCompiler.link - func(*args[:func.__code__.co_argcount]) - return - - def object_filenames (self, - source_filenames, - strip_dir=0, - output_dir=''): - if output_dir is None: output_dir = '' - obj_names = [] - for src_name in source_filenames: - # use normcase to make sure '.rc' is really '.rc' and not '.RC' - (base, ext) = os.path.splitext (os.path.normcase(src_name)) - - # added these lines to strip off windows drive letters - # without it, .o files are placed next to .c files - # instead of the build directory - drv, base = os.path.splitdrive(base) - if drv: - base = base[1:] - - if ext not in (self.src_extensions + ['.rc', '.res']): - raise UnknownFileError( - "unknown file type '%s' (from '%s')" % \ - (ext, src_name)) - if strip_dir: - base = os.path.basename (base) - if ext == '.res' or ext == '.rc': - # these need to be compiled to object files - obj_names.append (os.path.join (output_dir, - base + ext + self.obj_extension)) - else: - obj_names.append (os.path.join (output_dir, - base + self.obj_extension)) - return obj_names - - # object_filenames () - - -def find_python_dll(): - # We can't do much here: - # - find it in the virtualenv (sys.prefix) - # - find it in python main dir (sys.base_prefix, if in a virtualenv) - # - in system32, - # - otherwise (Sxs), I don't know how to get it. - stems = [sys.prefix] - if sys.base_prefix != sys.prefix: - stems.append(sys.base_prefix) - - sub_dirs = ['', 'lib', 'bin'] - # generate possible combinations of directory trees and sub-directories - lib_dirs = [] - for stem in stems: - for folder in sub_dirs: - lib_dirs.append(os.path.join(stem, folder)) - - # add system directory as well - if 'SYSTEMROOT' in os.environ: - lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32')) - - # search in the file system for possible candidates - major_version, minor_version = tuple(sys.version_info[:2]) - implementation = sys.implementation.name - if implementation == 'cpython': - dllname = f'python{major_version}{minor_version}.dll' - elif implementation == 'pypy': - dllname = f'libpypy{major_version}.{minor_version}-c.dll' - else: - dllname = f'Unknown platform {implementation}' - print("Looking for %s" % dllname) - for folder in lib_dirs: - dll = os.path.join(folder, dllname) - if os.path.exists(dll): - return dll - - raise ValueError("%s not found in %s" % (dllname, lib_dirs)) - -def dump_table(dll): - st = subprocess.check_output(["objdump.exe", "-p", dll]) - return st.split(b'\n') - -def generate_def(dll, dfile): - """Given a dll file location, get all its exported symbols and dump them - into the given def file. - - The .def file will be overwritten""" - dump = dump_table(dll) - for i in range(len(dump)): - if _START.match(dump[i].decode()): - break - else: - raise ValueError("Symbol table not found") - - syms = [] - for j in range(i+1, len(dump)): - m = _TABLE.match(dump[j].decode()) - if m: - syms.append((int(m.group(1).strip()), m.group(2))) - else: - break - - if len(syms) == 0: - log.warn('No symbols found in %s' % dll) - - with open(dfile, 'w') as d: - d.write('LIBRARY %s\n' % os.path.basename(dll)) - d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') - d.write(';DATA PRELOAD SINGLE\n') - d.write('\nEXPORTS\n') - for s in syms: - #d.write('@%d %s\n' % (s[0], s[1])) - d.write('%s\n' % s[1]) - -def find_dll(dll_name): - - arch = {'AMD64' : 'amd64', - 'ARM64' : 'arm64', - 'Intel' : 'x86'}[get_build_architecture()] - - def _find_dll_in_winsxs(dll_name): - # Walk through the WinSxS directory to find the dll. - winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'), - 'winsxs') - if not os.path.exists(winsxs_path): - return None - for root, dirs, files in os.walk(winsxs_path): - if dll_name in files and arch in root: - return os.path.join(root, dll_name) - return None - - def _find_dll_in_path(dll_name): - # First, look in the Python directory, then scan PATH for - # the given dll name. - for path in [sys.prefix] + os.environ['PATH'].split(';'): - filepath = os.path.join(path, dll_name) - if os.path.exists(filepath): - return os.path.abspath(filepath) - - return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name) - -def build_msvcr_library(debug=False): - if os.name != 'nt': - return False - - # If the version number is None, then we couldn't find the MSVC runtime at - # all, because we are running on a Python distribution which is customed - # compiled; trust that the compiler is the same as the one available to us - # now, and that it is capable of linking with the correct runtime without - # any extra options. - msvcr_ver = msvc_runtime_major() - if msvcr_ver is None: - log.debug('Skip building import library: ' - 'Runtime is not compiled with MSVC') - return False - - # Skip using a custom library for versions < MSVC 8.0 - if msvcr_ver < 80: - log.debug('Skip building msvcr library:' - ' custom functionality not present') - return False - - msvcr_name = msvc_runtime_library() - if debug: - msvcr_name += 'd' - - # Skip if custom library already exists - out_name = "lib%s.a" % msvcr_name - out_file = os.path.join(sys.prefix, 'libs', out_name) - if os.path.isfile(out_file): - log.debug('Skip building msvcr library: "%s" exists' % - (out_file,)) - return True - - # Find the msvcr dll - msvcr_dll_name = msvcr_name + '.dll' - dll_file = find_dll(msvcr_dll_name) - if not dll_file: - log.warn('Cannot build msvcr library: "%s" not found' % - msvcr_dll_name) - return False - - def_name = "lib%s.def" % msvcr_name - def_file = os.path.join(sys.prefix, 'libs', def_name) - - log.info('Building msvcr library: "%s" (from %s)' \ - % (out_file, dll_file)) - - # Generate a symbol definition file from the msvcr dll - generate_def(dll_file, def_file) - - # Create a custom mingw library for the given symbol definitions - cmd = ['dlltool', '-d', def_file, '-l', out_file] - retcode = subprocess.call(cmd) - - # Clean up symbol definitions - os.remove(def_file) - - return (not retcode) - -def build_import_library(): - if os.name != 'nt': - return - - arch = get_build_architecture() - if arch == 'AMD64': - return _build_import_library_amd64() - if arch == 'ARM64': - return _build_import_library_arm64() - elif arch == 'Intel': - return _build_import_library_x86() - else: - raise ValueError("Unhandled arch %s" % arch) - -def _check_for_import_lib(): - """Check if an import library for the Python runtime already exists.""" - major_version, minor_version = tuple(sys.version_info[:2]) - - # patterns for the file name of the library itself - patterns = ['libpython%d%d.a', - 'libpython%d%d.dll.a', - 'libpython%d.%d.dll.a'] - - # directory trees that may contain the library - stems = [sys.prefix] - if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: - stems.append(sys.base_prefix) - elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: - stems.append(sys.real_prefix) - - # possible subdirectories within those trees where it is placed - sub_dirs = ['libs', 'lib'] - - # generate a list of candidate locations - candidates = [] - for pat in patterns: - filename = pat % (major_version, minor_version) - for stem_dir in stems: - for folder in sub_dirs: - candidates.append(os.path.join(stem_dir, folder, filename)) - - # test the filesystem to see if we can find any of these - for fullname in candidates: - if os.path.isfile(fullname): - # already exists, in location given - return (True, fullname) - - # needs to be built, preferred location given first - return (False, candidates[0]) - -def _build_import_library_amd64(): - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - # get the runtime dll for which we are building import library - dll_file = find_python_dll() - log.info('Building import library (arch=AMD64): "%s" (from %s)' % - (out_file, dll_file)) - - # generate symbol list from this library - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - generate_def(dll_file, def_file) - - # generate import library from this symbol list - cmd = ['dlltool', '-d', def_file, '-l', out_file] - subprocess.check_call(cmd) - -def _build_import_library_arm64(): - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - # get the runtime dll for which we are building import library - dll_file = find_python_dll() - log.info('Building import library (arch=ARM64): "%s" (from %s)' % - (out_file, dll_file)) - - # generate symbol list from this library - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - generate_def(dll_file, def_file) - - # generate import library from this symbol list - cmd = ['dlltool', '-d', def_file, '-l', out_file] - subprocess.check_call(cmd) - -def _build_import_library_x86(): - """ Build the import libraries for Mingw32-gcc on Windows - """ - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) - lib_file = os.path.join(sys.prefix, 'libs', lib_name) - if not os.path.isfile(lib_file): - # didn't find library file in virtualenv, try base distribution, too, - # and use that instead if found there. for Python 2.7 venvs, the base - # directory is in attribute real_prefix instead of base_prefix. - if hasattr(sys, 'base_prefix'): - base_lib = os.path.join(sys.base_prefix, 'libs', lib_name) - elif hasattr(sys, 'real_prefix'): - base_lib = os.path.join(sys.real_prefix, 'libs', lib_name) - else: - base_lib = '' # os.path.isfile('') == False - - if os.path.isfile(base_lib): - lib_file = base_lib - else: - log.warn('Cannot build import library: "%s" not found', lib_file) - return - log.info('Building import library (ARCH=x86): "%s"', out_file) - - from numpy.distutils import lib2def - - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - nm_output = lib2def.getnm( - lib2def.DEFAULT_NM + [lib_file], shell=False) - dlist, flist = lib2def.parse_nm(nm_output) - with open(def_file, 'w') as fid: - lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid) - - dll_name = find_python_dll () - - cmd = ["dlltool", - "--dllname", dll_name, - "--def", def_file, - "--output-lib", out_file] - status = subprocess.check_output(cmd) - if status: - log.warn('Failed to build import library for gcc. Linking will fail.') - return - -#===================================== -# Dealing with Visual Studio MANIFESTS -#===================================== - -# Functions to deal with visual studio manifests. Manifest are a mechanism to -# enforce strong DLL versioning on windows, and has nothing to do with -# distutils MANIFEST. manifests are XML files with version info, and used by -# the OS loader; they are necessary when linking against a DLL not in the -# system path; in particular, official python 2.6 binary is built against the -# MS runtime 9 (the one from VS 2008), which is not available on most windows -# systems; python 2.6 installer does install it in the Win SxS (Side by side) -# directory, but this requires the manifest for this to work. This is a big -# mess, thanks MS for a wonderful system. - -# XXX: ideally, we should use exactly the same version as used by python. I -# submitted a patch to get this version, but it was only included for python -# 2.6.1 and above. So for versions below, we use a "best guess". -_MSVCRVER_TO_FULLVER = {} -if sys.platform == 'win32': - try: - import msvcrt - # I took one version in my SxS directory: no idea if it is the good - # one, and we can't retrieve it from python - _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" - _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" - # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 - # on Windows XP: - _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" - crt_ver = getattr(msvcrt, 'CRT_ASSEMBLY_VERSION', None) - if crt_ver is not None: # Available at least back to Python 3.3 - maj, min = re.match(r'(\d+)\.(\d)', crt_ver).groups() - _MSVCRVER_TO_FULLVER[maj + min] = crt_ver - del maj, min - del crt_ver - except ImportError: - # If we are here, means python was not built with MSVC. Not sure what - # to do in that case: manifest building will fail, but it should not be - # used in that case anyway - log.warn('Cannot import msvcrt: using manifest will not be possible') - -def msvc_manifest_xml(maj, min): - """Given a major and minor version of the MSVCR, returns the - corresponding XML file.""" - try: - fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] - except KeyError: - raise ValueError("Version %d,%d of MSVCRT not supported yet" % - (maj, min)) from None - # Don't be fooled, it looks like an XML, but it is not. In particular, it - # should not have any space before starting, and its size should be - # divisible by 4, most likely for alignment constraints when the xml is - # embedded in the binary... - # This template was copied directly from the python 2.6 binary (using - # strings.exe from mingw on python.exe). - template = textwrap.dedent("""\ - - - - - - - - - - - - - - """) - - return template % {'fullver': fullver, 'maj': maj, 'min': min} - -def manifest_rc(name, type='dll'): - """Return the rc file used to generate the res file which will be embedded - as manifest for given manifest file name, of given type ('dll' or - 'exe'). - - Parameters - ---------- - name : str - name of the manifest file to embed - type : str {'dll', 'exe'} - type of the binary which will embed the manifest - - """ - if type == 'dll': - rctype = 2 - elif type == 'exe': - rctype = 1 - else: - raise ValueError("Type %s not supported" % type) - - return """\ -#include "winuser.h" -%d RT_MANIFEST %s""" % (rctype, name) - -def check_embedded_msvcr_match_linked(msver): - """msver is the ms runtime version used for the MANIFEST.""" - # check msvcr major version are the same for linking and - # embedding - maj = msvc_runtime_major() - if maj: - if not maj == int(msver): - raise ValueError( - "Discrepancy between linked msvcr " \ - "(%d) and the one about to be embedded " \ - "(%d)" % (int(msver), maj)) - -def configtest_name(config): - base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) - return os.path.splitext(base)[0] - -def manifest_name(config): - # Get configest name (including suffix) - root = configtest_name(config) - exext = config.compiler.exe_extension - return root + exext + ".manifest" - -def rc_name(config): - # Get configtest name (including suffix) - root = configtest_name(config) - return root + ".rc" - -def generate_manifest(config): - msver = get_build_msvc_version() - if msver is not None: - if msver >= 8: - check_embedded_msvcr_match_linked(msver) - ma_str, mi_str = str(msver).split('.') - # Write the manifest file - manxml = msvc_manifest_xml(int(ma_str), int(mi_str)) - with open(manifest_name(config), "w") as man: - config.temp_files.append(manifest_name(config)) - man.write(manxml) diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py deleted file mode 100644 index ca7bcf0fbdd0..000000000000 --- a/numpy/distutils/misc_util.py +++ /dev/null @@ -1,2484 +0,0 @@ -import os -import re -import sys -import copy -import glob -import atexit -import tempfile -import subprocess -import shutil -import multiprocessing -import textwrap -import importlib.util -from threading import local as tlocal -from functools import reduce - -import distutils -from distutils.errors import DistutilsError - -# stores temporary directory of each thread to only create one per thread -_tdata = tlocal() - -# store all created temporary directories so they can be deleted on exit -_tmpdirs = [] -def clean_up_temporary_directory(): - if _tmpdirs is not None: - for d in _tmpdirs: - try: - shutil.rmtree(d) - except OSError: - pass - -atexit.register(clean_up_temporary_directory) - -__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', - 'dict_append', 'appendpath', 'generate_config_py', - 'get_cmd', 'allpath', 'get_mathlibs', - 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', - 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings', - 'has_f_sources', 'has_cxx_sources', 'filter_sources', - 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', - 'get_script_files', 'get_lib_source_files', 'get_data_files', - 'dot_join', 'get_frame', 'minrelpath', 'njoin', - 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', - 'get_build_architecture', 'get_info', 'get_pkg_info', - 'get_num_build_jobs', 'sanitize_cxx_flags', - 'exec_mod_from_location'] - -class InstallableLib: - """ - Container to hold information on an installable library. - - Parameters - ---------- - name : str - Name of the installed library. - build_info : dict - Dictionary holding build information. - target_dir : str - Absolute path specifying where to install the library. - - See Also - -------- - Configuration.add_installed_library - - Notes - ----- - The three parameters are stored as attributes with the same names. - - """ - def __init__(self, name, build_info, target_dir): - self.name = name - self.build_info = build_info - self.target_dir = target_dir - - -def get_num_build_jobs(): - """ - Get number of parallel build jobs set by the --parallel command line - argument of setup.py - If the command did not receive a setting the environment variable - NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of - processors on the system, with a maximum of 8 (to prevent - overloading the system if there a lot of CPUs). - - Returns - ------- - out : int - number of parallel jobs that can be run - - """ - from numpy.distutils.core import get_distribution - try: - cpu_count = len(os.sched_getaffinity(0)) - except AttributeError: - cpu_count = multiprocessing.cpu_count() - cpu_count = min(cpu_count, 8) - envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count)) - dist = get_distribution() - # may be None during configuration - if dist is None: - return envjobs - - # any of these three may have the job set, take the largest - cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None), - getattr(dist.get_command_obj('build_ext'), 'parallel', None), - getattr(dist.get_command_obj('build_clib'), 'parallel', None)) - if all(x is None for x in cmdattr): - return envjobs - else: - return max(x for x in cmdattr if x is not None) - -def quote_args(args): - """Quote list of arguments. - - .. deprecated:: 1.22. - """ - import warnings - warnings.warn('"quote_args" is deprecated.', - DeprecationWarning, stacklevel=2) - # don't used _nt_quote_args as it does not check if - # args items already have quotes or not. - args = list(args) - for i in range(len(args)): - a = args[i] - if ' ' in a and a[0] not in '"\'': - args[i] = '"%s"' % (a) - return args - -def allpath(name): - "Convert a /-separated pathname to one using the OS's path separator." - split = name.split('/') - return os.path.join(*split) - -def rel_path(path, parent_path): - """Return path relative to parent_path.""" - # Use realpath to avoid issues with symlinked dirs (see gh-7707) - pd = os.path.realpath(os.path.abspath(parent_path)) - apath = os.path.realpath(os.path.abspath(path)) - if len(apath) < len(pd): - return path - if apath == pd: - return '' - if pd == apath[:len(pd)]: - assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)])) - path = apath[len(pd)+1:] - return path - -def get_path_from_frame(frame, parent_path=None): - """Return path of the module given a frame object from the call stack. - - Returned path is relative to parent_path when given, - otherwise it is absolute path. - """ - - # First, try to find if the file name is in the frame. - try: - caller_file = eval('__file__', frame.f_globals, frame.f_locals) - d = os.path.dirname(os.path.abspath(caller_file)) - except NameError: - # __file__ is not defined, so let's try __name__. We try this second - # because setuptools spoofs __name__ to be '__main__' even though - # sys.modules['__main__'] might be something else, like easy_install(1). - caller_name = eval('__name__', frame.f_globals, frame.f_locals) - __import__(caller_name) - mod = sys.modules[caller_name] - if hasattr(mod, '__file__'): - d = os.path.dirname(os.path.abspath(mod.__file__)) - else: - # we're probably running setup.py as execfile("setup.py") - # (likely we're building an egg) - d = os.path.abspath('.') - - if parent_path is not None: - d = rel_path(d, parent_path) - - return d or '.' - -def njoin(*path): - """Join two or more pathname components + - - convert a /-separated pathname to one using the OS's path separator. - - resolve `..` and `.` from path. - - Either passing n arguments as in njoin('a','b'), or a sequence - of n names as in njoin(['a','b']) is handled, or a mixture of such arguments. - """ - paths = [] - for p in path: - if is_sequence(p): - # njoin(['a', 'b'], 'c') - paths.append(njoin(*p)) - else: - assert is_string(p) - paths.append(p) - path = paths - if not path: - # njoin() - joined = '' - else: - # njoin('a', 'b') - joined = os.path.join(*path) - if os.path.sep != '/': - joined = joined.replace('/', os.path.sep) - return minrelpath(joined) - -def get_mathlibs(path=None): - """Return the MATHLIB line from numpyconfig.h - """ - if path is not None: - config_file = os.path.join(path, '_numpyconfig.h') - else: - # Look for the file in each of the numpy include directories. - dirs = get_numpy_include_dirs() - for path in dirs: - fn = os.path.join(path, '_numpyconfig.h') - if os.path.exists(fn): - config_file = fn - break - else: - raise DistutilsError('_numpyconfig.h not found in numpy include ' - 'dirs %r' % (dirs,)) - - with open(config_file) as fid: - mathlibs = [] - s = '#define MATHLIB' - for line in fid: - if line.startswith(s): - value = line[len(s):].strip() - if value: - mathlibs.extend(value.split(',')) - return mathlibs - -def minrelpath(path): - """Resolve `..` and '.' from path. - """ - if not is_string(path): - return path - if '.' not in path: - return path - l = path.split(os.sep) - while l: - try: - i = l.index('.', 1) - except ValueError: - break - del l[i] - j = 1 - while l: - try: - i = l.index('..', j) - except ValueError: - break - if l[i-1]=='..': - j += 1 - else: - del l[i], l[i-1] - j = 1 - if not l: - return '' - return os.sep.join(l) - -def sorted_glob(fileglob): - """sorts output of python glob for https://bugs.python.org/issue30461 - to allow extensions to have reproducible build results""" - return sorted(glob.glob(fileglob)) - -def _fix_paths(paths, local_path, include_non_existing): - assert is_sequence(paths), repr(type(paths)) - new_paths = [] - assert not is_string(paths), repr(paths) - for n in paths: - if is_string(n): - if '*' in n or '?' in n: - p = sorted_glob(n) - p2 = sorted_glob(njoin(local_path, n)) - if p2: - new_paths.extend(p2) - elif p: - new_paths.extend(p) - else: - if include_non_existing: - new_paths.append(n) - print('could not resolve pattern in %r: %r' % - (local_path, n)) - else: - n2 = njoin(local_path, n) - if os.path.exists(n2): - new_paths.append(n2) - else: - if os.path.exists(n): - new_paths.append(n) - elif include_non_existing: - new_paths.append(n) - if not os.path.exists(n): - print('non-existing path in %r: %r' % - (local_path, n)) - - elif is_sequence(n): - new_paths.extend(_fix_paths(n, local_path, include_non_existing)) - else: - new_paths.append(n) - return [minrelpath(p) for p in new_paths] - -def gpaths(paths, local_path='', include_non_existing=True): - """Apply glob to paths and prepend local_path if needed. - """ - if is_string(paths): - paths = (paths,) - return _fix_paths(paths, local_path, include_non_existing) - -def make_temp_file(suffix='', prefix='', text=True): - if not hasattr(_tdata, 'tempdir'): - _tdata.tempdir = tempfile.mkdtemp() - _tmpdirs.append(_tdata.tempdir) - fid, name = tempfile.mkstemp(suffix=suffix, - prefix=prefix, - dir=_tdata.tempdir, - text=text) - fo = os.fdopen(fid, 'w') - return fo, name - -# Hooks for colored terminal output. -# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle -def terminal_has_colors(): - if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ: - # Avoid importing curses that causes illegal operation - # with a message: - # PYTHON2 caused an invalid page fault in - # module CYGNURSES7.DLL as 015f:18bbfc28 - # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)] - # ssh to Win32 machine from debian - # curses.version is 2.2 - # CYGWIN_98-4.10, release 1.5.7(0.109/3/2)) - return 0 - if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(): - try: - import curses - curses.setupterm() - if (curses.tigetnum("colors") >= 0 - and curses.tigetnum("pairs") >= 0 - and ((curses.tigetstr("setf") is not None - and curses.tigetstr("setb") is not None) - or (curses.tigetstr("setaf") is not None - and curses.tigetstr("setab") is not None) - or curses.tigetstr("scp") is not None)): - return 1 - except Exception: - pass - return 0 - -if terminal_has_colors(): - _colour_codes = dict(black=0, red=1, green=2, yellow=3, - blue=4, magenta=5, cyan=6, white=7, default=9) - def colour_text(s, fg=None, bg=None, bold=False): - seq = [] - if bold: - seq.append('1') - if fg: - fgcode = 30 + _colour_codes.get(fg.lower(), 0) - seq.append(str(fgcode)) - if bg: - bgcode = 40 + _colour_codes.get(bg.lower(), 7) - seq.append(str(bgcode)) - if seq: - return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s) - else: - return s -else: - def colour_text(s, fg=None, bg=None): - return s - -def default_text(s): - return colour_text(s, 'default') -def red_text(s): - return colour_text(s, 'red') -def green_text(s): - return colour_text(s, 'green') -def yellow_text(s): - return colour_text(s, 'yellow') -def cyan_text(s): - return colour_text(s, 'cyan') -def blue_text(s): - return colour_text(s, 'blue') - -######################### - -def cyg2win32(path: str) -> str: - """Convert a path from Cygwin-native to Windows-native. - - Uses the cygpath utility (part of the Base install) to do the - actual conversion. Falls back to returning the original path if - this fails. - - Handles the default ``/cygdrive`` mount prefix as well as the - ``/proc/cygdrive`` portable prefix, custom cygdrive prefixes such - as ``/`` or ``/mnt``, and absolute paths such as ``/usr/src/`` or - ``/home/username`` - - Parameters - ---------- - path : str - The path to convert - - Returns - ------- - converted_path : str - The converted path - - Notes - ----- - Documentation for cygpath utility: - https://cygwin.com/cygwin-ug-net/cygpath.html - Documentation for the C function it wraps: - https://cygwin.com/cygwin-api/func-cygwin-conv-path.html - - """ - if sys.platform != "cygwin": - return path - return subprocess.check_output( - ["/usr/bin/cygpath", "--windows", path], text=True - ) - - -def mingw32(): - """Return true when using mingw32 environment. - """ - if sys.platform=='win32': - if os.environ.get('OSTYPE', '')=='msys': - return True - if os.environ.get('MSYSTEM', '')=='MINGW32': - return True - return False - -def msvc_runtime_version(): - "Return version of MSVC runtime library, as defined by __MSC_VER__ macro" - msc_pos = sys.version.find('MSC v.') - if msc_pos != -1: - msc_ver = int(sys.version[msc_pos+6:msc_pos+10]) - else: - msc_ver = None - return msc_ver - -def msvc_runtime_library(): - "Return name of MSVC runtime library if Python was built with MSVC >= 7" - ver = msvc_runtime_major () - if ver: - if ver < 140: - return "msvcr%i" % ver - else: - return "vcruntime%i" % ver - else: - return None - -def msvc_runtime_major(): - "Return major version of MSVC runtime coded like get_build_msvc_version" - major = {1300: 70, # MSVC 7.0 - 1310: 71, # MSVC 7.1 - 1400: 80, # MSVC 8 - 1500: 90, # MSVC 9 (aka 2008) - 1600: 100, # MSVC 10 (aka 2010) - 1900: 140, # MSVC 14 (aka 2015) - }.get(msvc_runtime_version(), None) - return major - -######################### - -#XXX need support for .C that is also C++ -cxx_ext_match = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match -fortran_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f)\Z', re.I).match -f90_ext_match = re.compile(r'.*\.(f90|f95)\Z', re.I).match -f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)', re.I).match -def _get_f90_modules(source): - """Return a list of Fortran f90 module names that - given source file defines. - """ - if not f90_ext_match(source): - return [] - modules = [] - with open(source) as f: - for line in f: - m = f90_module_name_match(line) - if m: - name = m.group('name') - modules.append(name) - # break # XXX can we assume that there is one module per file? - return modules - -def is_string(s): - return isinstance(s, str) - -def all_strings(lst): - """Return True if all items in lst are string objects. """ - return all(is_string(item) for item in lst) - -def is_sequence(seq): - if is_string(seq): - return False - try: - len(seq) - except Exception: - return False - return True - -def is_glob_pattern(s): - return is_string(s) and ('*' in s or '?' in s) - -def as_list(seq): - if is_sequence(seq): - return list(seq) - else: - return [seq] - -def get_language(sources): - # not used in numpy/scipy packages, use build_ext.detect_language instead - """Determine language value (c,f77,f90) from sources """ - language = None - for source in sources: - if isinstance(source, str): - if f90_ext_match(source): - language = 'f90' - break - elif fortran_ext_match(source): - language = 'f77' - return language - -def has_f_sources(sources): - """Return True if sources contains Fortran files """ - return any(fortran_ext_match(source) for source in sources) - -def has_cxx_sources(sources): - """Return True if sources contains C++ files """ - return any(cxx_ext_match(source) for source in sources) - -def filter_sources(sources): - """Return four lists of filenames containing - C, C++, Fortran, and Fortran 90 module sources, - respectively. - """ - c_sources = [] - cxx_sources = [] - f_sources = [] - fmodule_sources = [] - for source in sources: - if fortran_ext_match(source): - modules = _get_f90_modules(source) - if modules: - fmodule_sources.append(source) - else: - f_sources.append(source) - elif cxx_ext_match(source): - cxx_sources.append(source) - else: - c_sources.append(source) - return c_sources, cxx_sources, f_sources, fmodule_sources - - -def _get_headers(directory_list): - # get *.h files from list of directories - headers = [] - for d in directory_list: - head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files?? - headers.extend(head) - return headers - -def _get_directories(list_of_sources): - # get unique directories from list of sources. - direcs = [] - for f in list_of_sources: - d = os.path.split(f) - if d[0] != '' and not d[0] in direcs: - direcs.append(d[0]) - return direcs - -def _commandline_dep_string(cc_args, extra_postargs, pp_opts): - """ - Return commandline representation used to determine if a file needs - to be recompiled - """ - cmdline = 'commandline: ' - cmdline += ' '.join(cc_args) - cmdline += ' '.join(extra_postargs) - cmdline += ' '.join(pp_opts) + '\n' - return cmdline - - -def get_dependencies(sources): - #XXX scan sources for include statements - return _get_headers(_get_directories(sources)) - -def is_local_src_dir(directory): - """Return true if directory is local directory. - """ - if not is_string(directory): - return False - abs_dir = os.path.abspath(directory) - c = os.path.commonprefix([os.getcwd(), abs_dir]) - new_dir = abs_dir[len(c):].split(os.sep) - if new_dir and not new_dir[0]: - new_dir = new_dir[1:] - if new_dir and new_dir[0]=='build': - return False - new_dir = os.sep.join(new_dir) - return os.path.isdir(new_dir) - -def general_source_files(top_path): - pruned_directories = {'CVS':1, '.svn':1, 'build':1} - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for f in filenames: - if not prune_file_pat.search(f): - yield os.path.join(dirpath, f) - -def general_source_directories_files(top_path): - """Return a directory name relative to top_path and - files contained. - """ - pruned_directories = ['CVS', '.svn', 'build'] - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for d in dirnames: - dpath = os.path.join(dirpath, d) - rpath = rel_path(dpath, top_path) - files = [] - for f in os.listdir(dpath): - fn = os.path.join(dpath, f) - if os.path.isfile(fn) and not prune_file_pat.search(fn): - files.append(fn) - yield rpath, files - dpath = top_path - rpath = rel_path(dpath, top_path) - filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \ - if not prune_file_pat.search(f)] - files = [f for f in filenames if os.path.isfile(f)] - yield rpath, files - - -def get_ext_source_files(ext): - # Get sources and any include files in the same directory. - filenames = [] - sources = [_m for _m in ext.sources if is_string(_m)] - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - for d in ext.depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_script_files(scripts): - scripts = [_m for _m in scripts if is_string(_m)] - return scripts - -def get_lib_source_files(lib): - filenames = [] - sources = lib[1].get('sources', []) - sources = [_m for _m in sources if is_string(_m)] - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - depends = lib[1].get('depends', []) - for d in depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_shared_lib_extension(is_python_ext=False): - """Return the correct file extension for shared libraries. - - Parameters - ---------- - is_python_ext : bool, optional - Whether the shared library is a Python extension. Default is False. - - Returns - ------- - so_ext : str - The shared library extension. - - Notes - ----- - For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X, - and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on - POSIX systems according to PEP 3149. - - """ - confvars = distutils.sysconfig.get_config_vars() - so_ext = confvars.get('EXT_SUFFIX', '') - - if not is_python_ext: - # hardcode known values, config vars (including SHLIB_SUFFIX) are - # unreliable (see #3182) - # darwin, windows and debug linux are wrong in 3.3.1 and older - if (sys.platform.startswith('linux') or - sys.platform.startswith('gnukfreebsd')): - so_ext = '.so' - elif sys.platform.startswith('darwin'): - so_ext = '.dylib' - elif sys.platform.startswith('win'): - so_ext = '.dll' - else: - # fall back to config vars for unknown platforms - # fix long extension for Python >=3.2, see PEP 3149. - if 'SOABI' in confvars: - # Does nothing unless SOABI config var exists - so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1) - - return so_ext - -def get_data_files(data): - if is_string(data): - return [data] - sources = data[1] - filenames = [] - for s in sources: - if hasattr(s, '__call__'): - continue - if is_local_src_dir(s): - filenames.extend(list(general_source_files(s))) - elif is_string(s): - if os.path.isfile(s): - filenames.append(s) - else: - print('Not existing data file:', s) - else: - raise TypeError(repr(s)) - return filenames - -def dot_join(*args): - return '.'.join([a for a in args if a]) - -def get_frame(level=0): - """Return frame object from call stack with given level. - """ - try: - return sys._getframe(level+1) - except AttributeError: - frame = sys.exc_info()[2].tb_frame - for _ in range(level+1): - frame = frame.f_back - return frame - - -###################### - -class Configuration: - - _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', - 'libraries', 'headers', 'scripts', 'py_modules', - 'installed_libraries', 'define_macros'] - _dict_keys = ['package_dir', 'installed_pkg_config'] - _extra_keys = ['name', 'version'] - - numpy_include_dirs = [] - - def __init__(self, - package_name=None, - parent_name=None, - top_path=None, - package_path=None, - caller_level=1, - setup_name='setup.py', - **attrs): - """Construct configuration instance of a package. - - package_name -- name of the package - Ex.: 'distutils' - parent_name -- name of the parent package - Ex.: 'numpy' - top_path -- directory of the toplevel package - Ex.: the directory where the numpy package source sits - package_path -- directory of package. Will be computed by magic from the - directory of the caller module if not specified - Ex.: the directory where numpy.distutils is - caller_level -- frame level to caller namespace, internal parameter. - """ - self.name = dot_join(parent_name, package_name) - self.version = None - - caller_frame = get_frame(caller_level) - self.local_path = get_path_from_frame(caller_frame, top_path) - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - if top_path is None: - top_path = self.local_path - self.local_path = '' - if package_path is None: - package_path = self.local_path - elif os.path.isdir(njoin(self.local_path, package_path)): - package_path = njoin(self.local_path, package_path) - if not os.path.isdir(package_path or '.'): - raise ValueError("%r is not a directory" % (package_path,)) - self.top_path = top_path - self.package_path = package_path - # this is the relative path in the installed package - self.path_in_package = os.path.join(*self.name.split('.')) - - self.list_keys = self._list_keys[:] - self.dict_keys = self._dict_keys[:] - - for n in self.list_keys: - v = copy.copy(attrs.get(n, [])) - setattr(self, n, as_list(v)) - - for n in self.dict_keys: - v = copy.copy(attrs.get(n, {})) - setattr(self, n, v) - - known_keys = self.list_keys + self.dict_keys - self.extra_keys = self._extra_keys[:] - for n in attrs.keys(): - if n in known_keys: - continue - a = attrs[n] - setattr(self, n, a) - if isinstance(a, list): - self.list_keys.append(n) - elif isinstance(a, dict): - self.dict_keys.append(n) - else: - self.extra_keys.append(n) - - if os.path.exists(njoin(package_path, '__init__.py')): - self.packages.append(self.name) - self.package_dir[self.name] = package_path - - self.options = dict( - ignore_setup_xxx_py = False, - assume_default_configuration = False, - delegate_options_to_subpackages = False, - quiet = False, - ) - - caller_instance = None - for i in range(1, 3): - try: - f = get_frame(i) - except ValueError: - break - try: - caller_instance = eval('self', f.f_globals, f.f_locals) - break - except NameError: - pass - if isinstance(caller_instance, self.__class__): - if caller_instance.options['delegate_options_to_subpackages']: - self.set_options(**caller_instance.options) - - self.setup_name = setup_name - - def todict(self): - """ - Return a dictionary compatible with the keyword arguments of distutils - setup function. - - Examples - -------- - >>> setup(**config.todict()) #doctest: +SKIP - """ - - self._optimize_data_files() - d = {} - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for n in known_keys: - a = getattr(self, n) - if a: - d[n] = a - return d - - def info(self, message): - if not self.options['quiet']: - print(message) - - def warn(self, message): - sys.stderr.write('Warning: %s\n' % (message,)) - - def set_options(self, **options): - """ - Configure Configuration instance. - - The following options are available: - - ignore_setup_xxx_py - - assume_default_configuration - - delegate_options_to_subpackages - - quiet - - """ - for key, value in options.items(): - if key in self.options: - self.options[key] = value - else: - raise ValueError('Unknown option: '+key) - - def get_distribution(self): - """Return the distutils distribution object for self.""" - from numpy.distutils.core import get_distribution - return get_distribution() - - def _wildcard_get_subpackage(self, subpackage_name, - parent_name, - caller_level = 1): - l = subpackage_name.split('.') - subpackage_path = njoin([self.local_path]+l) - dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)] - config_list = [] - for d in dirs: - if not os.path.isfile(njoin(d, '__init__.py')): - continue - if 'build' in d.split(os.sep): - continue - n = '.'.join(d.split(os.sep)[-len(l):]) - c = self.get_subpackage(n, - parent_name = parent_name, - caller_level = caller_level+1) - config_list.extend(c) - return config_list - - def _get_configuration_from_setup_py(self, setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = 1): - # In case setup_py imports local modules: - sys.path.insert(0, os.path.dirname(setup_py)) - try: - setup_name = os.path.splitext(os.path.basename(setup_py))[0] - n = dot_join(self.name, subpackage_name, setup_name) - setup_module = exec_mod_from_location( - '_'.join(n.split('.')), setup_py) - if not hasattr(setup_module, 'configuration'): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s does not define configuration())'\ - % (setup_module)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level + 1) - else: - pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1])) - args = (pn,) - if setup_module.configuration.__code__.co_argcount > 1: - args = args + (self.top_path,) - config = setup_module.configuration(*args) - if config.name!=dot_join(parent_name, subpackage_name): - self.warn('Subpackage %r configuration returned as %r' % \ - (dot_join(parent_name, subpackage_name), config.name)) - finally: - del sys.path[0] - return config - - def get_subpackage(self,subpackage_name, - subpackage_path=None, - parent_name=None, - caller_level = 1): - """Return list of subpackage configurations. - - Parameters - ---------- - subpackage_name : str or None - Name of the subpackage to get the configuration. '*' in - subpackage_name is handled as a wildcard. - subpackage_path : str - If None, then the path is assumed to be the local path plus the - subpackage_name. If a setup.py file is not found in the - subpackage_path, then a default configuration is used. - parent_name : str - Parent name. - """ - if subpackage_name is None: - if subpackage_path is None: - raise ValueError( - "either subpackage_name or subpackage_path must be specified") - subpackage_name = os.path.basename(subpackage_path) - - # handle wildcards - l = subpackage_name.split('.') - if subpackage_path is None and '*' in subpackage_name: - return self._wildcard_get_subpackage(subpackage_name, - parent_name, - caller_level = caller_level+1) - assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name)) - if subpackage_path is None: - subpackage_path = njoin([self.local_path] + l) - else: - subpackage_path = njoin([subpackage_path] + l[:-1]) - subpackage_path = self.paths([subpackage_path])[0] - setup_py = njoin(subpackage_path, self.setup_name) - if not self.options['ignore_setup_xxx_py']: - if not os.path.isfile(setup_py): - setup_py = njoin(subpackage_path, - 'setup_%s.py' % (subpackage_name)) - if not os.path.isfile(setup_py): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s/{setup_%s,setup}.py was not found)' \ - % (os.path.dirname(setup_py), subpackage_name)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level+1) - else: - config = self._get_configuration_from_setup_py( - setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = caller_level + 1) - if config: - return [config] - else: - return [] - - def add_subpackage(self,subpackage_name, - subpackage_path=None, - standalone = False): - """Add a sub-package to the current Configuration instance. - - This is useful in a setup.py script for adding sub-packages to a - package. - - Parameters - ---------- - subpackage_name : str - name of the subpackage - subpackage_path : str - if given, the subpackage path such as the subpackage is in - subpackage_path / subpackage_name. If None,the subpackage is - assumed to be located in the local path / subpackage_name. - standalone : bool - """ - - if standalone: - parent_name = None - else: - parent_name = self.name - config_list = self.get_subpackage(subpackage_name, subpackage_path, - parent_name = parent_name, - caller_level = 2) - if not config_list: - self.warn('No configuration returned, assuming unavailable.') - for config in config_list: - d = config - if isinstance(config, Configuration): - d = config.todict() - assert isinstance(d, dict), repr(type(d)) - - self.info('Appending %s configuration to %s' \ - % (d.get('name'), self.name)) - self.dict_append(**d) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a subpackage '+ subpackage_name) - - def add_data_dir(self, data_path): - """Recursively add files under data_path to data_files list. - - Recursively add files under data_path to the list of data_files to be - installed (and distributed). The data_path can be either a relative - path-name, or an absolute path-name, or a 2-tuple where the first - argument shows where in the install directory the data directory - should be installed to. - - Parameters - ---------- - data_path : seq or str - Argument can be either - - * 2-sequence (, ) - * path to data directory where python datadir suffix defaults - to package dir. - - Notes - ----- - Rules for installation paths:: - - foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar - (gun, foo/bar) -> parent/gun - foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b - (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun - (gun/*, foo/*) -> parent/gun/a, parent/gun/b - /foo/bar -> (bar, /foo/bar) -> parent/bar - (gun, /foo/bar) -> parent/gun - (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar - - Examples - -------- - For example suppose the source directory contains fun/foo.dat and - fun/bar/car.dat: - - >>> self.add_data_dir('fun') #doctest: +SKIP - >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP - >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP - - Will install data-files to the locations:: - - / - fun/ - foo.dat - bar/ - car.dat - sun/ - foo.dat - bar/ - car.dat - gun/ - foo.dat - car.dat - - """ - if is_sequence(data_path): - d, data_path = data_path - else: - d = None - if is_sequence(data_path): - [self.add_data_dir((d, p)) for p in data_path] - return - if not is_string(data_path): - raise TypeError("not a string: %r" % (data_path,)) - if d is None: - if os.path.isabs(data_path): - return self.add_data_dir((os.path.basename(data_path), data_path)) - return self.add_data_dir((data_path, data_path)) - paths = self.paths(data_path, include_non_existing=False) - if is_glob_pattern(data_path): - if is_glob_pattern(d): - pattern_list = allpath(d).split(os.sep) - pattern_list.reverse() - # /a/*//b/ -> /a/*/b - rl = list(range(len(pattern_list)-1)); rl.reverse() - for i in rl: - if not pattern_list[i]: - del pattern_list[i] - # - for path in paths: - if not os.path.isdir(path): - print('Not a directory, skipping', path) - continue - rpath = rel_path(path, self.local_path) - path_list = rpath.split(os.sep) - path_list.reverse() - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - if i>=len(path_list): - raise ValueError('cannot fill pattern %r with %r' \ - % (d, path)) - target_list.append(path_list[i]) - else: - assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath)) - target_list.append(s) - i += 1 - if path_list[i:]: - self.warn('mismatch of pattern_list=%s and path_list=%s'\ - % (pattern_list, path_list)) - target_list.reverse() - self.add_data_dir((os.sep.join(target_list), path)) - else: - for path in paths: - self.add_data_dir((d, path)) - return - assert not is_glob_pattern(d), repr(d) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - for path in paths: - for d1, f in list(general_source_directories_files(path)): - target_path = os.path.join(self.path_in_package, d, d1) - data_files.append((target_path, f)) - - def _optimize_data_files(self): - data_dict = {} - for p, files in self.data_files: - if p not in data_dict: - data_dict[p] = set() - for f in files: - data_dict[p].add(f) - self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()] - - def add_data_files(self,*files): - """Add data files to configuration data_files. - - Parameters - ---------- - files : sequence - Argument(s) can be either - - * 2-sequence (,) - * paths to data files where python datadir prefix defaults - to package dir. - - Notes - ----- - The form of each element of the files sequence is very flexible - allowing many combinations of where to get the files from the package - and where they should ultimately be installed on the system. The most - basic usage is for an element of the files argument sequence to be a - simple filename. This will cause that file from the local path to be - installed to the installation path of the self.name package (package - path). The file argument can also be a relative path in which case the - entire relative path will be installed into the package directory. - Finally, the file can be an absolute path name in which case the file - will be found at the absolute path name but installed to the package - path. - - This basic behavior can be augmented by passing a 2-tuple in as the - file argument. The first element of the tuple should specify the - relative path (under the package install directory) where the - remaining sequence of files should be installed to (it has nothing to - do with the file-names in the source distribution). The second element - of the tuple is the sequence of files that should be installed. The - files in this sequence can be filenames, relative paths, or absolute - paths. For absolute paths the file will be installed in the top-level - package installation directory (regardless of the first argument). - Filenames and relative path names will be installed in the package - install directory under the path name given as the first element of - the tuple. - - Rules for installation paths: - - #. file.txt -> (., file.txt)-> parent/file.txt - #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt - #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt - #. ``*``.txt -> parent/a.txt, parent/b.txt - #. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt - #. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt - #. (sun, file.txt) -> parent/sun/file.txt - #. (sun, bar/file.txt) -> parent/sun/file.txt - #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt - #. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt - - An additional feature is that the path to a data-file can actually be - a function that takes no arguments and returns the actual path(s) to - the data-files. This is useful when the data files are generated while - building the package. - - Examples - -------- - Add files to the list of data_files to be included with the package. - - >>> self.add_data_files('foo.dat', - ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), - ... 'bar/cat.dat', - ... '/full/path/to/can.dat') #doctest: +SKIP - - will install these data files to:: - - / - foo.dat - fun/ - gun.dat - nun/ - pun.dat - sun.dat - bar/ - car.dat - can.dat - - where is the package (or sub-package) - directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage') or - '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). - """ - - if len(files)>1: - for f in files: - self.add_data_files(f) - return - assert len(files)==1 - if is_sequence(files[0]): - d, files = files[0] - else: - d = None - if is_string(files): - filepat = files - elif is_sequence(files): - if len(files)==1: - filepat = files[0] - else: - for f in files: - self.add_data_files((d, f)) - return - else: - raise TypeError(repr(type(files))) - - if d is None: - if hasattr(filepat, '__call__'): - d = '' - elif os.path.isabs(filepat): - d = '' - else: - d = os.path.dirname(filepat) - self.add_data_files((d, files)) - return - - paths = self.paths(filepat, include_non_existing=False) - if is_glob_pattern(filepat): - if is_glob_pattern(d): - pattern_list = d.split(os.sep) - pattern_list.reverse() - for path in paths: - path_list = path.split(os.sep) - path_list.reverse() - path_list.pop() # filename - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - target_list.append(path_list[i]) - i += 1 - else: - target_list.append(s) - target_list.reverse() - self.add_data_files((os.sep.join(target_list), path)) - else: - self.add_data_files((d, paths)) - return - assert not is_glob_pattern(d), repr((d, filepat)) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - data_files.append((os.path.join(self.path_in_package, d), paths)) - - ### XXX Implement add_py_modules - - def add_define_macros(self, macros): - """Add define macros to configuration - - Add the given sequence of macro name and value duples to the beginning - of the define_macros list This list will be visible to all extension - modules of the current package. - """ - dist = self.get_distribution() - if dist is not None: - if not hasattr(dist, 'define_macros'): - dist.define_macros = [] - dist.define_macros.extend(macros) - else: - self.define_macros.extend(macros) - - - def add_include_dirs(self,*paths): - """Add paths to configuration include directories. - - Add the given sequence of paths to the beginning of the include_dirs - list. This list will be visible to all extension modules of the - current package. - """ - include_dirs = self.paths(paths) - dist = self.get_distribution() - if dist is not None: - if dist.include_dirs is None: - dist.include_dirs = [] - dist.include_dirs.extend(include_dirs) - else: - self.include_dirs.extend(include_dirs) - - def add_headers(self,*files): - """Add installable headers to configuration. - - Add the given sequence of files to the beginning of the headers list. - By default, headers will be installed under // directory. If an item of files - is a tuple, then its first argument specifies the actual installation - location relative to the path. - - Parameters - ---------- - files : str or seq - Argument(s) can be either: - - * 2-sequence (,) - * path(s) to header file(s) where python includedir suffix will - default to package name. - """ - headers = [] - for path in files: - if is_string(path): - [headers.append((self.name, p)) for p in self.paths(path)] - else: - if not isinstance(path, (tuple, list)) or len(path) != 2: - raise TypeError(repr(path)) - [headers.append((path[0], p)) for p in self.paths(path[1])] - dist = self.get_distribution() - if dist is not None: - if dist.headers is None: - dist.headers = [] - dist.headers.extend(headers) - else: - self.headers.extend(headers) - - def paths(self,*paths,**kws): - """Apply glob to paths and prepend local_path if needed. - - Applies glob.glob(...) to each path in the sequence (if needed) and - prepends the local_path if needed. Because this is called on all - source lists, this allows wildcard characters to be specified in lists - of sources for extension modules and libraries and scripts and allows - path-names be relative to the source directory. - - """ - include_non_existing = kws.get('include_non_existing', True) - return gpaths(paths, - local_path = self.local_path, - include_non_existing=include_non_existing) - - def _fix_paths_dict(self, kw): - for k in kw.keys(): - v = kw[k] - if k in ['sources', 'depends', 'include_dirs', 'library_dirs', - 'module_dirs', 'extra_objects']: - new_v = self.paths(v) - kw[k] = new_v - - def add_extension(self,name,sources,**kw): - """Add extension to configuration. - - Create and add an Extension instance to the ext_modules list. This - method also takes the following optional keyword arguments that are - passed on to the Extension constructor. - - Parameters - ---------- - name : str - name of the extension - sources : seq - list of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - include_dirs : - define_macros : - undef_macros : - library_dirs : - libraries : - runtime_library_dirs : - extra_objects : - extra_compile_args : - extra_link_args : - extra_f77_compile_args : - extra_f90_compile_args : - export_symbols : - swig_opts : - depends : - The depends list contains paths to files or directories that the - sources of the extension module depend on. If any path in the - depends list is newer than the extension module, then the module - will be rebuilt. - language : - f2py_options : - module_dirs : - extra_info : dict or list - dict or list of dict of keywords to be appended to keywords. - - Notes - ----- - The self.paths(...) method is applied to all lists that may contain - paths. - """ - ext_args = copy.copy(kw) - ext_args['name'] = dot_join(self.name, name) - ext_args['sources'] = sources - - if 'extra_info' in ext_args: - extra_info = ext_args['extra_info'] - del ext_args['extra_info'] - if isinstance(extra_info, dict): - extra_info = [extra_info] - for info in extra_info: - assert isinstance(info, dict), repr(info) - dict_append(ext_args,**info) - - self._fix_paths_dict(ext_args) - - # Resolve out-of-tree dependencies - libraries = ext_args.get('libraries', []) - libnames = [] - ext_args['libraries'] = [] - for libname in libraries: - if isinstance(libname, tuple): - self._fix_paths_dict(libname[1]) - - # Handle library names of the form libname@relative/path/to/library - if '@' in libname: - lname, lpath = libname.split('@', 1) - lpath = os.path.abspath(njoin(self.local_path, lpath)) - if os.path.isdir(lpath): - c = self.get_subpackage(None, lpath, - caller_level = 2) - if isinstance(c, Configuration): - c = c.todict() - for l in [l[0] for l in c.get('libraries', [])]: - llname = l.split('__OF__', 1)[0] - if llname == lname: - c.pop('name', None) - dict_append(ext_args,**c) - break - continue - libnames.append(libname) - - ext_args['libraries'] = libnames + ext_args['libraries'] - ext_args['define_macros'] = \ - self.define_macros + ext_args.get('define_macros', []) - - from numpy.distutils.core import Extension - ext = Extension(**ext_args) - self.ext_modules.append(ext) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add an extension '+name) - return ext - - def add_library(self,name,sources,**build_info): - """ - Add library to configuration. - - Parameters - ---------- - name : str - Name of the extension. - sources : sequence - List of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compile_args - * extra_f90_compile_args - * f2py_options - * language - - """ - self._add_library(name, sources, None, build_info) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a library '+ name) - - def _add_library(self, name, sources, install_dir, build_info): - """Common implementation for add_library and add_installed_library. Do - not use directly""" - build_info = copy.copy(build_info) - build_info['sources'] = sources - - # Sometimes, depends is not set up to an empty list by default, and if - # depends is not given to add_library, distutils barfs (#1134) - if not 'depends' in build_info: - build_info['depends'] = [] - - self._fix_paths_dict(build_info) - - # Add to libraries list so that it is build with build_clib - self.libraries.append((name, build_info)) - - def add_installed_library(self, name, sources, install_dir, build_info=None): - """ - Similar to add_library, but the specified library is installed. - - Most C libraries used with ``distutils`` are only used to build python - extensions, but libraries built through this method will be installed - so that they can be reused by third-party packages. - - Parameters - ---------- - name : str - Name of the installed library. - sources : sequence - List of the library's source files. See `add_library` for details. - install_dir : str - Path to install the library, relative to the current sub-package. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compile_args - * extra_f90_compile_args - * f2py_options - * language - - Returns - ------- - None - - See Also - -------- - add_library, add_npy_pkg_config, get_info - - Notes - ----- - The best way to encode the options required to link against the specified - C libraries is to use a "libname.ini" file, and use `get_info` to - retrieve the required options (see `add_npy_pkg_config` for more - information). - - """ - if not build_info: - build_info = {} - - install_dir = os.path.join(self.package_path, install_dir) - self._add_library(name, sources, install_dir, build_info) - self.installed_libraries.append(InstallableLib(name, build_info, install_dir)) - - def add_npy_pkg_config(self, template, install_dir, subst_dict=None): - """ - Generate and install a npy-pkg config file from a template. - - The config file generated from `template` is installed in the - given install directory, using `subst_dict` for variable substitution. - - Parameters - ---------- - template : str - The path of the template, relatively to the current package path. - install_dir : str - Where to install the npy-pkg config file, relatively to the current - package path. - subst_dict : dict, optional - If given, any string of the form ``@key@`` will be replaced by - ``subst_dict[key]`` in the template file when installed. The install - prefix is always available through the variable ``@prefix@``, since the - install prefix is not easy to get reliably from setup.py. - - See also - -------- - add_installed_library, get_info - - Notes - ----- - This works for both standard installs and in-place builds, i.e. the - ``@prefix@`` refer to the source directory for in-place builds. - - Examples - -------- - :: - - config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar}) - - Assuming the foo.ini.in file has the following content:: - - [meta] - Name=@foo@ - Version=1.0 - Description=dummy description - - [default] - Cflags=-I@prefix@/include - Libs= - - The generated file will have the following content:: - - [meta] - Name=bar - Version=1.0 - Description=dummy description - - [default] - Cflags=-Iprefix_dir/include - Libs= - - and will be installed as foo.ini in the 'lib' subpath. - - When cross-compiling with numpy distutils, it might be necessary to - use modified npy-pkg-config files. Using the default/generated files - will link with the host libraries (i.e. libnpymath.a). For - cross-compilation you of-course need to link with target libraries, - while using the host Python installation. - - You can copy out the numpy/_core/lib/npy-pkg-config directory, add a - pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment - variable to point to the directory with the modified npy-pkg-config - files. - - Example npymath.ini modified for cross-compilation:: - - [meta] - Name=npymath - Description=Portable, core math library implementing C99 standard - Version=0.1 - - [variables] - pkgname=numpy._core - pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/_core - prefix=${pkgdir} - libdir=${prefix}/lib - includedir=${prefix}/include - - [default] - Libs=-L${libdir} -lnpymath - Cflags=-I${includedir} - Requires=mlib - - [msvc] - Libs=/LIBPATH:${libdir} npymath.lib - Cflags=/INCLUDE:${includedir} - Requires=mlib - - """ - if subst_dict is None: - subst_dict = {} - template = os.path.join(self.package_path, template) - - if self.name in self.installed_pkg_config: - self.installed_pkg_config[self.name].append((template, install_dir, - subst_dict)) - else: - self.installed_pkg_config[self.name] = [(template, install_dir, - subst_dict)] - - - def add_scripts(self,*files): - """Add scripts to configuration. - - Add the sequence of files to the beginning of the scripts list. - Scripts will be installed under the /bin/ directory. - - """ - scripts = self.paths(files) - dist = self.get_distribution() - if dist is not None: - if dist.scripts is None: - dist.scripts = [] - dist.scripts.extend(scripts) - else: - self.scripts.extend(scripts) - - def dict_append(self,**dict): - for key in self.list_keys: - a = getattr(self, key) - a.extend(dict.get(key, [])) - for key in self.dict_keys: - a = getattr(self, key) - a.update(dict.get(key, {})) - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for key in dict.keys(): - if key not in known_keys: - a = getattr(self, key, None) - if a and a==dict[key]: continue - self.warn('Inheriting attribute %r=%r from %r' \ - % (key, dict[key], dict.get('name', '?'))) - setattr(self, key, dict[key]) - self.extra_keys.append(key) - elif key in self.extra_keys: - self.info('Ignoring attempt to set %r (from %r to %r)' \ - % (key, getattr(self, key), dict[key])) - elif key in known_keys: - # key is already processed above - pass - else: - raise ValueError("Don't know about key=%r" % (key)) - - def __str__(self): - from pprint import pformat - known_keys = self.list_keys + self.dict_keys + self.extra_keys - s = '<'+5*'-' + '\n' - s += 'Configuration of '+self.name+':\n' - known_keys.sort() - for k in known_keys: - a = getattr(self, k, None) - if a: - s += '%s = %s\n' % (k, pformat(a)) - s += 5*'-' + '>' - return s - - def get_config_cmd(self): - """ - Returns the numpy.distutils config command instance. - """ - cmd = get_cmd('config') - cmd.ensure_finalized() - cmd.dump_source = 0 - cmd.noisy = 0 - old_path = os.environ.get('PATH') - if old_path: - path = os.pathsep.join(['.', old_path]) - os.environ['PATH'] = path - return cmd - - def get_build_temp_dir(self): - """ - Return a path to a temporary directory where temporary files should be - placed. - """ - cmd = get_cmd('build') - cmd.ensure_finalized() - return cmd.build_temp - - def have_f77c(self): - """Check for availability of Fortran 77 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 77 compiler is available (because a simple Fortran 77 - code was able to be compiled successfully). - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77') - return flag - - def have_f90c(self): - """Check for availability of Fortran 90 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 90 compiler is available (because a simple Fortran - 90 code was able to be compiled successfully) - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90') - return flag - - def append_to(self, extlib): - """Append libraries, include_dirs to extension or library item. - """ - if is_sequence(extlib): - lib_name, build_info = extlib - dict_append(build_info, - libraries=self.libraries, - include_dirs=self.include_dirs) - else: - from numpy.distutils.core import Extension - assert isinstance(extlib, Extension), repr(extlib) - extlib.libraries.extend(self.libraries) - extlib.include_dirs.extend(self.include_dirs) - - def _get_svn_revision(self, path): - """Return path's SVN revision number. - """ - try: - output = subprocess.check_output(['svnversion'], cwd=path) - except (subprocess.CalledProcessError, OSError): - pass - else: - m = re.match(rb'(?P\d+)', output) - if m: - return int(m.group('revision')) - - if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None): - entries = njoin(path, '_svn', 'entries') - else: - entries = njoin(path, '.svn', 'entries') - if os.path.isfile(entries): - with open(entries) as f: - fstr = f.read() - if fstr[:5] == '\d+)"', fstr) - if m: - return int(m.group('revision')) - else: # non-xml entries file --- check to be sure that - m = re.search(r'dir[\n\r]+(?P\d+)', fstr) - if m: - return int(m.group('revision')) - return None - - def _get_hg_revision(self, path): - """Return path's Mercurial revision number. - """ - try: - output = subprocess.check_output( - ['hg', 'identify', '--num'], cwd=path) - except (subprocess.CalledProcessError, OSError): - pass - else: - m = re.match(rb'(?P\d+)', output) - if m: - return int(m.group('revision')) - - branch_fn = njoin(path, '.hg', 'branch') - branch_cache_fn = njoin(path, '.hg', 'branch.cache') - - if os.path.isfile(branch_fn): - branch0 = None - with open(branch_fn) as f: - revision0 = f.read().strip() - - branch_map = {} - with open(branch_cache_fn) as f: - for line in f: - branch1, revision1 = line.split()[:2] - if revision1==revision0: - branch0 = branch1 - try: - revision1 = int(revision1) - except ValueError: - continue - branch_map[branch1] = revision1 - - return branch_map.get(branch0) - - return None - - - def get_version(self, version_file=None, version_variable=None): - """Try to get version string of a package. - - Return a version string of the current package or None if the version - information could not be detected. - - Notes - ----- - This method scans files named - __version__.py, _version.py, version.py, and - __svn_version__.py for string variables version, __version__, and - _version, until a version number is found. - """ - version = getattr(self, 'version', None) - if version is not None: - return version - - # Get version from version file. - if version_file is None: - files = ['__version__.py', - self.name.split('.')[-1]+'_version.py', - 'version.py', - '__svn_version__.py', - '__hg_version__.py'] - else: - files = [version_file] - if version_variable is None: - version_vars = ['version', - '__version__', - self.name.split('.')[-1]+'_version'] - else: - version_vars = [version_variable] - for f in files: - fn = njoin(self.local_path, f) - if os.path.isfile(fn): - info = ('.py', 'U', 1) - name = os.path.splitext(os.path.basename(fn))[0] - n = dot_join(self.name, name) - try: - version_module = exec_mod_from_location( - '_'.join(n.split('.')), fn) - except ImportError as e: - self.warn(str(e)) - version_module = None - if version_module is None: - continue - - for a in version_vars: - version = getattr(version_module, a, None) - if version is not None: - break - - # Try if versioneer module - try: - version = version_module.get_versions()['version'] - except AttributeError: - pass - - if version is not None: - break - - if version is not None: - self.version = version - return version - - # Get version as SVN or Mercurial revision number - revision = self._get_svn_revision(self.local_path) - if revision is None: - revision = self._get_hg_revision(self.local_path) - - if revision is not None: - version = str(revision) - self.version = version - - return version - - def make_svn_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __svn_version__.py file to the current package directory. - - Generate package __svn_version__.py file from SVN revision number, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __svn_version__.py existed before, nothing is done. - - This is - intended for working with source directories that are in an SVN - repository. - """ - target = njoin(self.local_path, '__svn_version__.py') - revision = self._get_svn_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_svn_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target, version)) - with open(target, 'w') as f: - f.write('version = %r\n' % (version)) - - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_svn_version_py())) - - def make_hg_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __hg_version__.py file to the current package directory. - - Generate package __hg_version__.py file from Mercurial revision, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __hg_version__.py existed before, nothing is done. - - This is intended for working with source directories that are - in a Mercurial repository. - """ - target = njoin(self.local_path, '__hg_version__.py') - revision = self._get_hg_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_hg_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target, version)) - with open(target, 'w') as f: - f.write('version = %r\n' % (version)) - - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_hg_version_py())) - - def make_config_py(self,name='__config__'): - """Generate package __config__.py file containing system_info - information used during building the package. - - This file is installed to the - package installation directory. - - """ - self.py_modules.append((self.name, name, generate_config_py)) - - def get_info(self,*names): - """Get resources information. - - Return information (from system_info.get_info) for all of the names in - the argument list in a single dictionary. - """ - from .system_info import get_info, dict_append - info_dict = {} - for a in names: - dict_append(info_dict,**get_info(a)) - return info_dict - - -def get_cmd(cmdname, _cache={}): - if cmdname not in _cache: - import distutils.core - dist = distutils.core._setup_distribution - if dist is None: - from distutils.errors import DistutilsInternalError - raise DistutilsInternalError( - 'setup distribution instance not initialized') - cmd = dist.get_command_obj(cmdname) - _cache[cmdname] = cmd - return _cache[cmdname] - -def get_numpy_include_dirs(): - # numpy_include_dirs are set by numpy/_core/setup.py, otherwise [] - include_dirs = Configuration.numpy_include_dirs[:] - if not include_dirs: - import numpy - include_dirs = [ numpy.get_include() ] - # else running numpy/_core/setup.py - return include_dirs - -def get_npy_pkg_dir(): - """Return the path where to find the npy-pkg-config directory. - - If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that - is returned. Otherwise, a path inside the location of the numpy module is - returned. - - The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining - customized npy-pkg-config .ini files for the cross-compilation - environment, and using them when cross-compiling. - - """ - d = os.environ.get('NPY_PKG_CONFIG_PATH') - if d is not None: - return d - spec = importlib.util.find_spec('numpy') - d = os.path.join(os.path.dirname(spec.origin), - '_core', 'lib', 'npy-pkg-config') - return d - -def get_pkg_info(pkgname, dirs=None): - """ - Return library info for the given package. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_info - - """ - from numpy.distutils.npy_pkg_config import read_config - - if dirs: - dirs.append(get_npy_pkg_dir()) - else: - dirs = [get_npy_pkg_dir()] - return read_config(pkgname, dirs) - -def get_info(pkgname, dirs=None): - """ - Return an info dict for a given C library. - - The info dict contains the necessary options to use the C library. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - info : dict - The dictionary with build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_pkg_info - - Examples - -------- - To get the necessary information for the npymath library from NumPy: - - >>> npymath_info = np.distutils.misc_util.get_info('npymath') - >>> npymath_info #doctest: +SKIP - {'define_macros': [], 'libraries': ['npymath'], 'library_dirs': - ['.../numpy/_core/lib'], 'include_dirs': ['.../numpy/_core/include']} - - This info dict can then be used as input to a `Configuration` instance:: - - config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info) - - """ - from numpy.distutils.npy_pkg_config import parse_flags - pkg_info = get_pkg_info(pkgname, dirs) - - # Translate LibraryInfo instance into a build_info dict - info = parse_flags(pkg_info.cflags()) - for k, v in parse_flags(pkg_info.libs()).items(): - info[k].extend(v) - - # add_extension extra_info argument is ANAL - info['define_macros'] = info['macros'] - del info['macros'] - del info['ignored'] - - return info - -def is_bootstrapping(): - import builtins - - try: - builtins.__NUMPY_SETUP__ - return True - except AttributeError: - return False - - -######################### - -def default_config_dict(name = None, parent_name = None, local_path=None): - """Return a configuration dictionary for usage in - configuration() function defined in file setup_.py. - """ - import warnings - warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ - 'deprecated default_config_dict(%r,%r,%r)' - % (name, parent_name, local_path, - name, parent_name, local_path, - ), stacklevel=2) - c = Configuration(name, parent_name, local_path) - return c.todict() - - -def dict_append(d, **kws): - for k, v in kws.items(): - if k in d: - ov = d[k] - if isinstance(ov, str): - d[k] = v - else: - d[k].extend(v) - else: - d[k] = v - -def appendpath(prefix, path): - if os.path.sep != '/': - prefix = prefix.replace('/', os.path.sep) - path = path.replace('/', os.path.sep) - drive = '' - if os.path.isabs(path): - drive = os.path.splitdrive(prefix)[0] - absprefix = os.path.splitdrive(os.path.abspath(prefix))[1] - pathdrive, path = os.path.splitdrive(path) - d = os.path.commonprefix([absprefix, path]) - if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \ - or os.path.join(path[:len(d)], path[len(d):]) != path: - # Handle invalid paths - d = os.path.dirname(d) - subpath = path[len(d):] - if os.path.isabs(subpath): - subpath = subpath[1:] - else: - subpath = path - return os.path.normpath(njoin(drive + prefix, subpath)) - -def generate_config_py(target): - """Generate config.py file containing system_info information - used during building the package. - - Usage: - config['py_modules'].append((packagename, '__config__',generate_config_py)) - """ - from numpy.distutils.system_info import system_info - from distutils.dir_util import mkpath - mkpath(os.path.dirname(target)) - with open(target, 'w') as f: - f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0]))) - f.write('# It contains system_info results at the time of building this package.\n') - f.write('__all__ = ["get_info","show"]\n\n') - - # For gfortran+msvc combination, extra shared libraries may exist - f.write(textwrap.dedent(""" - import os - import sys - - extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') - - if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): - os.add_dll_directory(extra_dll_dir) - - """)) - - for k, i in system_info.saved_results.items(): - f.write('%s=%r\n' % (k, i)) - f.write(textwrap.dedent(r''' - def get_info(name): - g = globals() - return g.get(name, g.get(name + "_info", {})) - - def show(): - """ - Show libraries in the system on which NumPy was built. - - Print information about various resources (libraries, library - directories, include directories, etc.) in the system on which - NumPy was built. - - See Also - -------- - get_include : Returns the directory containing NumPy C - header files. - - Notes - ----- - 1. Classes specifying the information to be printed are defined - in the `numpy.distutils.system_info` module. - - Information may include: - - * ``language``: language used to write the libraries (mostly - C or f77) - * ``libraries``: names of libraries found in the system - * ``library_dirs``: directories containing the libraries - * ``include_dirs``: directories containing library header files - * ``src_dirs``: directories containing library source files - * ``define_macros``: preprocessor macros used by - ``distutils.setup`` - * ``baseline``: minimum CPU features required - * ``found``: dispatched features supported in the system - * ``not found``: dispatched features that are not supported - in the system - - 2. NumPy BLAS/LAPACK Installation Notes - - Installing a numpy wheel (``pip install numpy`` or force it - via ``pip install numpy --only-binary :numpy: numpy``) includes - an OpenBLAS implementation of the BLAS and LAPACK linear algebra - APIs. In this case, ``library_dirs`` reports the original build - time configuration as compiled with gcc/gfortran; at run time - the OpenBLAS library is in - ``site-packages/numpy.libs/`` (linux), or - ``site-packages/numpy/.dylibs/`` (macOS), or - ``site-packages/numpy/.libs/`` (windows). - - Installing numpy from source - (``pip install numpy --no-binary numpy``) searches for BLAS and - LAPACK dynamic link libraries at build time as influenced by - environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and - NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER; - or the optional file ``~/.numpy-site.cfg``. - NumPy remembers those locations and expects to load the same - libraries at run-time. - In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS - library) is in the default build-time search order after - 'openblas'. - - Examples - -------- - >>> import numpy as np - >>> np.show_config() - blas_opt_info: - language = c - define_macros = [('HAVE_CBLAS', None)] - libraries = ['openblas', 'openblas'] - library_dirs = ['/usr/local/lib'] - """ - from numpy._core._multiarray_umath import ( - __cpu_features__, __cpu_baseline__, __cpu_dispatch__ - ) - for name,info_dict in globals().items(): - if name[0] == "_" or type(info_dict) is not type({}): continue - print(name + ":") - if not info_dict: - print(" NOT AVAILABLE") - for k,v in info_dict.items(): - v = str(v) - if k == "sources" and len(v) > 200: - v = v[:60] + " ...\n... " + v[-60:] - print(" %s = %s" % (k,v)) - - features_found, features_not_found = [], [] - for feature in __cpu_dispatch__: - if __cpu_features__[feature]: - features_found.append(feature) - else: - features_not_found.append(feature) - - print("Supported SIMD extensions in this NumPy install:") - print(" baseline = %s" % (','.join(__cpu_baseline__))) - print(" found = %s" % (','.join(features_found))) - print(" not found = %s" % (','.join(features_not_found))) - - ''')) - - return target - -def msvc_version(compiler): - """Return version major and minor of compiler instance if it is - MSVC, raise an exception otherwise.""" - if not compiler.compiler_type == "msvc": - raise ValueError("Compiler instance is not msvc (%s)"\ - % compiler.compiler_type) - return compiler._MSVCCompiler__version - -def get_build_architecture(): - # Importing distutils.msvccompiler triggers a warning on non-Windows - # systems, so delay the import to here. - from distutils.msvccompiler import get_build_architecture - return get_build_architecture() - - -_cxx_ignore_flags = {'-Werror=implicit-function-declaration', '-std=c99'} - - -def sanitize_cxx_flags(cxxflags): - ''' - Some flags are valid for C but not C++. Prune them. - ''' - return [flag for flag in cxxflags if flag not in _cxx_ignore_flags] - - -def exec_mod_from_location(modname, modfile): - ''' - Use importlib machinery to import a module `modname` from the file - `modfile`. Depending on the `spec.loader`, the module may not be - registered in sys.modules. - ''' - spec = importlib.util.spec_from_file_location(modname, modfile) - foo = importlib.util.module_from_spec(spec) - spec.loader.exec_module(foo) - return foo diff --git a/numpy/distutils/msvc9compiler.py b/numpy/distutils/msvc9compiler.py deleted file mode 100644 index 68239495d6c7..000000000000 --- a/numpy/distutils/msvc9compiler.py +++ /dev/null @@ -1,63 +0,0 @@ -import os -from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler - -from .system_info import platform_bits - - -def _merge(old, new): - """Concatenate two environment paths avoiding repeats. - - Here `old` is the environment string before the base class initialize - function is called and `new` is the string after the call. The new string - will be a fixed string if it is not obtained from the current environment, - or the same as the old string if obtained from the same environment. The aim - here is not to append the new string if it is already contained in the old - string so as to limit the growth of the environment string. - - Parameters - ---------- - old : string - Previous environment string. - new : string - New environment string. - - Returns - ------- - ret : string - Updated environment string. - - """ - if not old: - return new - if new in old: - return old - - # Neither new nor old is empty. Give old priority. - return ';'.join([old, new]) - - -class MSVCCompiler(_MSVCCompiler): - def __init__(self, verbose=0, dry_run=0, force=0): - _MSVCCompiler.__init__(self, verbose, dry_run, force) - - def initialize(self, plat_name=None): - # The 'lib' and 'include' variables may be overwritten - # by MSVCCompiler.initialize, so save them for later merge. - environ_lib = os.getenv('lib') - environ_include = os.getenv('include') - _MSVCCompiler.initialize(self, plat_name) - - # Merge current and previous values of 'lib' and 'include' - os.environ['lib'] = _merge(environ_lib, os.environ['lib']) - os.environ['include'] = _merge(environ_include, os.environ['include']) - - # msvc9 building for 32 bits requires SSE2 to work around a - # compiler bug. - if platform_bits == 32: - self.compile_options += ['/arch:SSE2'] - self.compile_options_debug += ['/arch:SSE2'] - - def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): - ld_args.append('/MANIFEST') - _MSVCCompiler.manifest_setup_ldargs(self, output_filename, - build_temp, ld_args) diff --git a/numpy/distutils/msvccompiler.py b/numpy/distutils/msvccompiler.py deleted file mode 100644 index 2b93221baac8..000000000000 --- a/numpy/distutils/msvccompiler.py +++ /dev/null @@ -1,76 +0,0 @@ -import os -from distutils.msvccompiler import MSVCCompiler as _MSVCCompiler - -from .system_info import platform_bits - - -def _merge(old, new): - """Concatenate two environment paths avoiding repeats. - - Here `old` is the environment string before the base class initialize - function is called and `new` is the string after the call. The new string - will be a fixed string if it is not obtained from the current environment, - or the same as the old string if obtained from the same environment. The aim - here is not to append the new string if it is already contained in the old - string so as to limit the growth of the environment string. - - Parameters - ---------- - old : string - Previous environment string. - new : string - New environment string. - - Returns - ------- - ret : string - Updated environment string. - - """ - if new in old: - return old - if not old: - return new - - # Neither new nor old is empty. Give old priority. - return ';'.join([old, new]) - - -class MSVCCompiler(_MSVCCompiler): - def __init__(self, verbose=0, dry_run=0, force=0): - _MSVCCompiler.__init__(self, verbose, dry_run, force) - - def initialize(self): - # The 'lib' and 'include' variables may be overwritten - # by MSVCCompiler.initialize, so save them for later merge. - environ_lib = os.getenv('lib', '') - environ_include = os.getenv('include', '') - _MSVCCompiler.initialize(self) - - # Merge current and previous values of 'lib' and 'include' - os.environ['lib'] = _merge(environ_lib, os.environ['lib']) - os.environ['include'] = _merge(environ_include, os.environ['include']) - - # msvc9 building for 32 bits requires SSE2 to work around a - # compiler bug. - if platform_bits == 32: - self.compile_options += ['/arch:SSE2'] - self.compile_options_debug += ['/arch:SSE2'] - - -def lib_opts_if_msvc(build_cmd): - """ Add flags if we are using MSVC compiler - - We can't see `build_cmd` in our scope, because we have not initialized - the distutils build command, so use this deferred calculation to run - when we are building the library. - """ - if build_cmd.compiler.compiler_type != 'msvc': - return [] - # Explicitly disable whole-program optimization. - flags = ['/GL-'] - # Disable voltbl section for vc142 to allow link using mingw-w64; see: - # https://github.com/matthew-brett/dll_investigation/issues/1#issuecomment-1100468171 - if build_cmd.compiler_opt.cc_test_flags(['-d2VolatileMetadata-']): - flags.append('-d2VolatileMetadata-') - return flags diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py deleted file mode 100644 index 14e8791b14cd..000000000000 --- a/numpy/distutils/npy_pkg_config.py +++ /dev/null @@ -1,441 +0,0 @@ -import sys -import re -import os - -from configparser import RawConfigParser - -__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', - 'read_config', 'parse_flags'] - -_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}') - -class FormatError(OSError): - """ - Exception thrown when there is a problem parsing a configuration file. - - """ - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -class PkgNotFound(OSError): - """Exception raised when a package can not be located.""" - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -def parse_flags(line): - """ - Parse a line from a config file containing compile flags. - - Parameters - ---------- - line : str - A single line containing one or more compile flags. - - Returns - ------- - d : dict - Dictionary of parsed flags, split into relevant categories. - These categories are the keys of `d`: - - * 'include_dirs' - * 'library_dirs' - * 'libraries' - * 'macros' - * 'ignored' - - """ - d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], - 'macros': [], 'ignored': []} - - flags = (' ' + line).split(' -') - for flag in flags: - flag = '-' + flag - if len(flag) > 0: - if flag.startswith('-I'): - d['include_dirs'].append(flag[2:].strip()) - elif flag.startswith('-L'): - d['library_dirs'].append(flag[2:].strip()) - elif flag.startswith('-l'): - d['libraries'].append(flag[2:].strip()) - elif flag.startswith('-D'): - d['macros'].append(flag[2:].strip()) - else: - d['ignored'].append(flag) - - return d - -def _escape_backslash(val): - return val.replace('\\', '\\\\') - -class LibraryInfo: - """ - Object containing build information about a library. - - Parameters - ---------- - name : str - The library name. - description : str - Description of the library. - version : str - Version string. - sections : dict - The sections of the configuration file for the library. The keys are - the section headers, the values the text under each header. - vars : class instance - A `VariableSet` instance, which contains ``(name, value)`` pairs for - variables defined in the configuration file for the library. - requires : sequence, optional - The required libraries for the library to be installed. - - Notes - ----- - All input parameters (except "sections" which is a method) are available as - attributes of the same name. - - """ - def __init__(self, name, description, version, sections, vars, requires=None): - self.name = name - self.description = description - if requires: - self.requires = requires - else: - self.requires = [] - self.version = version - self._sections = sections - self.vars = vars - - def sections(self): - """ - Return the section headers of the config file. - - Parameters - ---------- - None - - Returns - ------- - keys : list of str - The list of section headers. - - """ - return list(self._sections.keys()) - - def cflags(self, section="default"): - val = self.vars.interpolate(self._sections[section]['cflags']) - return _escape_backslash(val) - - def libs(self, section="default"): - val = self.vars.interpolate(self._sections[section]['libs']) - return _escape_backslash(val) - - def __str__(self): - m = ['Name: %s' % self.name, 'Description: %s' % self.description] - if self.requires: - m.append('Requires:') - else: - m.append('Requires: %s' % ",".join(self.requires)) - m.append('Version: %s' % self.version) - - return "\n".join(m) - -class VariableSet: - """ - Container object for the variables defined in a config file. - - `VariableSet` can be used as a plain dictionary, with the variable names - as keys. - - Parameters - ---------- - d : dict - Dict of items in the "variables" section of the configuration file. - - """ - def __init__(self, d): - self._raw_data = dict([(k, v) for k, v in d.items()]) - - self._re = {} - self._re_sub = {} - - self._init_parse() - - def _init_parse(self): - for k, v in self._raw_data.items(): - self._init_parse_var(k, v) - - def _init_parse_var(self, name, value): - self._re[name] = re.compile(r'\$\{%s\}' % name) - self._re_sub[name] = value - - def interpolate(self, value): - # Brute force: we keep interpolating until there is no '${var}' anymore - # or until interpolated string is equal to input string - def _interpolate(value): - for k in self._re.keys(): - value = self._re[k].sub(self._re_sub[k], value) - return value - while _VAR.search(value): - nvalue = _interpolate(value) - if nvalue == value: - break - value = nvalue - - return value - - def variables(self): - """ - Return the list of variable names. - - Parameters - ---------- - None - - Returns - ------- - names : list of str - The names of all variables in the `VariableSet` instance. - - """ - return list(self._raw_data.keys()) - - # Emulate a dict to set/get variables values - def __getitem__(self, name): - return self._raw_data[name] - - def __setitem__(self, name, value): - self._raw_data[name] = value - self._init_parse_var(name, value) - -def parse_meta(config): - if not config.has_section('meta'): - raise FormatError("No meta section found !") - - d = dict(config.items('meta')) - - for k in ['name', 'description', 'version']: - if not k in d: - raise FormatError("Option %s (section [meta]) is mandatory, " - "but not found" % k) - - if not 'requires' in d: - d['requires'] = [] - - return d - -def parse_variables(config): - if not config.has_section('variables'): - raise FormatError("No variables section found !") - - d = {} - - for name, value in config.items("variables"): - d[name] = value - - return VariableSet(d) - -def parse_sections(config): - return meta_d, r - -def pkg_to_filename(pkg_name): - return "%s.ini" % pkg_name - -def parse_config(filename, dirs=None): - if dirs: - filenames = [os.path.join(d, filename) for d in dirs] - else: - filenames = [filename] - - config = RawConfigParser() - - n = config.read(filenames) - if not len(n) >= 1: - raise PkgNotFound("Could not find file(s) %s" % str(filenames)) - - # Parse meta and variables sections - meta = parse_meta(config) - - vars = {} - if config.has_section('variables'): - for name, value in config.items("variables"): - vars[name] = _escape_backslash(value) - - # Parse "normal" sections - secs = [s for s in config.sections() if not s in ['meta', 'variables']] - sections = {} - - requires = {} - for s in secs: - d = {} - if config.has_option(s, "requires"): - requires[s] = config.get(s, 'requires') - - for name, value in config.items(s): - d[name] = value - sections[s] = d - - return meta, vars, sections, requires - -def _read_config_imp(filenames, dirs=None): - def _read_config(f): - meta, vars, sections, reqs = parse_config(f, dirs) - # recursively add sections and variables of required libraries - for rname, rvalue in reqs.items(): - nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) - - # Update var dict for variables not in 'top' config file - for k, v in nvars.items(): - if not k in vars: - vars[k] = v - - # Update sec dict - for oname, ovalue in nsections[rname].items(): - if ovalue: - sections[rname][oname] += ' %s' % ovalue - - return meta, vars, sections, reqs - - meta, vars, sections, reqs = _read_config(filenames) - - # FIXME: document this. If pkgname is defined in the variables section, and - # there is no pkgdir variable defined, pkgdir is automatically defined to - # the path of pkgname. This requires the package to be imported to work - if not 'pkgdir' in vars and "pkgname" in vars: - pkgname = vars["pkgname"] - if not pkgname in sys.modules: - raise ValueError("You should import %s to get information on %s" % - (pkgname, meta["name"])) - - mod = sys.modules[pkgname] - vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__)) - - return LibraryInfo(name=meta["name"], description=meta["description"], - version=meta["version"], sections=sections, vars=VariableSet(vars)) - -# Trivial cache to cache LibraryInfo instances creation. To be really -# efficient, the cache should be handled in read_config, since a same file can -# be parsed many time outside LibraryInfo creation, but I doubt this will be a -# problem in practice -_CACHE = {} -def read_config(pkgname, dirs=None): - """ - Return library info for a package from its configuration file. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of directories - usually including - the NumPy base directory - where to look for npy-pkg-config files. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - misc_util.get_info, misc_util.get_pkg_info - - Examples - -------- - >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') - >>> type(npymath_info) - - >>> print(npymath_info) - Name: npymath - Description: Portable, core math library implementing C99 standard - Requires: - Version: 0.1 #random - - """ - try: - return _CACHE[pkgname] - except KeyError: - v = _read_config_imp(pkg_to_filename(pkgname), dirs) - _CACHE[pkgname] = v - return v - -# TODO: -# - implements version comparison (modversion + atleast) - -# pkg-config simple emulator - useful for debugging, and maybe later to query -# the system -if __name__ == '__main__': - from optparse import OptionParser - import glob - - parser = OptionParser() - parser.add_option("--cflags", dest="cflags", action="store_true", - help="output all preprocessor and compiler flags") - parser.add_option("--libs", dest="libs", action="store_true", - help="output all linker flags") - parser.add_option("--use-section", dest="section", - help="use this section instead of default for options") - parser.add_option("--version", dest="version", action="store_true", - help="output version") - parser.add_option("--atleast-version", dest="min_version", - help="Minimal version") - parser.add_option("--list-all", dest="list_all", action="store_true", - help="Minimal version") - parser.add_option("--define-variable", dest="define_variable", - help="Replace variable with the given value") - - (options, args) = parser.parse_args(sys.argv) - - if len(args) < 2: - raise ValueError("Expect package name on the command line:") - - if options.list_all: - files = glob.glob("*.ini") - for f in files: - info = read_config(f) - print("%s\t%s - %s" % (info.name, info.name, info.description)) - - pkg_name = args[1] - d = os.environ.get('NPY_PKG_CONFIG_PATH') - if d: - info = read_config( - pkg_name, ['numpy/_core/lib/npy-pkg-config', '.', d] - ) - else: - info = read_config( - pkg_name, ['numpy/_core/lib/npy-pkg-config', '.'] - ) - - if options.section: - section = options.section - else: - section = "default" - - if options.define_variable: - m = re.search(r'([\S]+)=([\S]+)', options.define_variable) - if not m: - raise ValueError("--define-variable option should be of " - "the form --define-variable=foo=bar") - else: - name = m.group(1) - value = m.group(2) - info.vars[name] = value - - if options.cflags: - print(info.cflags(section)) - if options.libs: - print(info.libs(section)) - if options.version: - print(info.version) - if options.min_version: - print(info.version >= options.min_version) diff --git a/numpy/distutils/numpy_distribution.py b/numpy/distutils/numpy_distribution.py deleted file mode 100644 index ea8182659cb1..000000000000 --- a/numpy/distutils/numpy_distribution.py +++ /dev/null @@ -1,17 +0,0 @@ -# XXX: Handle setuptools ? -from distutils.core import Distribution - -# This class is used because we add new files (sconscripts, and so on) with the -# scons command -class NumpyDistribution(Distribution): - def __init__(self, attrs = None): - # A list of (sconscripts, pre_hook, post_hook, src, parent_names) - self.scons_data = [] - # A list of installable libraries - self.installed_libraries = [] - # A dict of pkg_config files to generate/install - self.installed_pkg_config = {} - Distribution.__init__(self, attrs) - - def has_scons_scripts(self): - return bool(self.scons_data) diff --git a/numpy/distutils/pathccompiler.py b/numpy/distutils/pathccompiler.py deleted file mode 100644 index 1f879edf4d21..000000000000 --- a/numpy/distutils/pathccompiler.py +++ /dev/null @@ -1,21 +0,0 @@ -from distutils.unixccompiler import UnixCCompiler - -class PathScaleCCompiler(UnixCCompiler): - - """ - PathScale compiler compatible with a gcc built Python. - """ - - compiler_type = 'pathcc' - cc_exe = 'pathcc' - cxx_exe = 'pathCC' - - def __init__ (self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__ (self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables(compiler=cc_compiler, - compiler_so=cc_compiler, - compiler_cxx=cxx_compiler, - linker_exe=cc_compiler, - linker_so=cc_compiler + ' -shared') diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py deleted file mode 100644 index e428b47f08d4..000000000000 --- a/numpy/distutils/system_info.py +++ /dev/null @@ -1,3267 +0,0 @@ -""" -This file defines a set of system_info classes for getting -information about various resources (libraries, library directories, -include directories, etc.) in the system. Usage: - info_dict = get_info() - where is a string 'atlas','x11','fftw','lapack','blas', - 'lapack_src', 'blas_src', etc. For a complete list of allowed names, - see the definition of get_info() function below. - - Returned info_dict is a dictionary which is compatible with - distutils.setup keyword arguments. If info_dict == {}, then the - asked resource is not available (system_info could not find it). - - Several *_info classes specify an environment variable to specify - the locations of software. When setting the corresponding environment - variable to 'None' then the software will be ignored, even when it - is available in system. - -Global parameters: - system_info.search_static_first - search static libraries (.a) - in precedence to shared ones (.so, .sl) if enabled. - system_info.verbosity - output the results to stdout if enabled. - -The file 'site.cfg' is looked for in - -1) Directory of main setup.py file being run. -2) Home directory of user running the setup.py file as ~/.numpy-site.cfg -3) System wide directory (location of this file...) - -The first one found is used to get system configuration options The -format is that used by ConfigParser (i.e., Windows .INI style). The -section ALL is not intended for general use. - -Appropriate defaults are used if nothing is specified. - -The order of finding the locations of resources is the following: - 1. environment variable - 2. section in site.cfg - 3. DEFAULT section in site.cfg - 4. System default search paths (see ``default_*`` variables below). -Only the first complete match is returned. - -Currently, the following classes are available, along with their section names: - - Numeric_info:Numeric - _numpy_info:Numeric - _pkg_config_info:None - accelerate_info:accelerate - accelerate_lapack_info:accelerate - agg2_info:agg2 - amd_info:amd - atlas_3_10_blas_info:atlas - atlas_3_10_blas_threads_info:atlas - atlas_3_10_info:atlas - atlas_3_10_threads_info:atlas - atlas_blas_info:atlas - atlas_blas_threads_info:atlas - atlas_info:atlas - atlas_threads_info:atlas - blas64__opt_info:ALL # usage recommended (general ILP64 BLAS, 64_ symbol suffix) - blas_ilp64_opt_info:ALL # usage recommended (general ILP64 BLAS) - blas_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 BLAS, no symbol suffix) - blas_info:blas - blas_mkl_info:mkl - blas_ssl2_info:ssl2 - blas_opt_info:ALL # usage recommended - blas_src_info:blas_src - blis_info:blis - boost_python_info:boost_python - dfftw_info:fftw - dfftw_threads_info:fftw - djbfft_info:djbfft - f2py_info:ALL - fft_opt_info:ALL - fftw2_info:fftw - fftw3_info:fftw3 - fftw_info:fftw - fftw_threads_info:fftw - flame_info:flame - freetype2_info:freetype2 - gdk_2_info:gdk_2 - gdk_info:gdk - gdk_pixbuf_2_info:gdk_pixbuf_2 - gdk_pixbuf_xlib_2_info:gdk_pixbuf_xlib_2 - gdk_x11_2_info:gdk_x11_2 - gtkp_2_info:gtkp_2 - gtkp_x11_2_info:gtkp_x11_2 - lapack64__opt_info:ALL # usage recommended (general ILP64 LAPACK, 64_ symbol suffix) - lapack_atlas_3_10_info:atlas - lapack_atlas_3_10_threads_info:atlas - lapack_atlas_info:atlas - lapack_atlas_threads_info:atlas - lapack_ilp64_opt_info:ALL # usage recommended (general ILP64 LAPACK) - lapack_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 LAPACK, no symbol suffix) - lapack_info:lapack - lapack_mkl_info:mkl - lapack_ssl2_info:ssl2 - lapack_opt_info:ALL # usage recommended - lapack_src_info:lapack_src - mkl_info:mkl - ssl2_info:ssl2 - numarray_info:numarray - numerix_info:numerix - numpy_info:numpy - openblas64__info:openblas64_ - openblas64__lapack_info:openblas64_ - openblas_clapack_info:openblas - openblas_ilp64_info:openblas_ilp64 - openblas_ilp64_lapack_info:openblas_ilp64 - openblas_info:openblas - openblas_lapack_info:openblas - sfftw_info:fftw - sfftw_threads_info:fftw - system_info:ALL - umfpack_info:umfpack - wx_info:wx - x11_info:x11 - xft_info:xft - -Note that blas_opt_info and lapack_opt_info honor the NPY_BLAS_ORDER -and NPY_LAPACK_ORDER environment variables to determine the order in which -specific BLAS and LAPACK libraries are searched for. - -This search (or autodetection) can be bypassed by defining the environment -variables NPY_BLAS_LIBS and NPY_LAPACK_LIBS, which should then contain the -exact linker flags to use (language will be set to F77). Building against -Netlib BLAS/LAPACK or stub files, in order to be able to switch BLAS and LAPACK -implementations at runtime. If using this to build NumPy itself, it is -recommended to also define NPY_CBLAS_LIBS (assuming your BLAS library has a -CBLAS interface) to enable CBLAS usage for matrix multiplication (unoptimized -otherwise). - -Example: ----------- -[DEFAULT] -# default section -library_dirs = /usr/lib:/usr/local/lib:/opt/lib -include_dirs = /usr/include:/usr/local/include:/opt/include -src_dirs = /usr/local/src:/opt/src -# search static libraries (.a) in preference to shared ones (.so) -search_static_first = 0 - -[fftw] -libraries = rfftw, fftw - -[atlas] -library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas -# for overriding the names of the atlas libraries -libraries = lapack, f77blas, cblas, atlas - -[x11] -library_dirs = /usr/X11R6/lib -include_dirs = /usr/X11R6/include ----------- - -Note that the ``libraries`` key is the default setting for libraries. - -Authors: - Pearu Peterson , February 2002 - David M. Cooke , April 2002 - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -""" -import sys -import os -import re -import copy -import warnings -import subprocess -import textwrap - -from glob import glob -from functools import reduce -from configparser import NoOptionError -from configparser import RawConfigParser as ConfigParser -# It seems that some people are importing ConfigParser from here so is -# good to keep its class name. Use of RawConfigParser is needed in -# order to be able to load path names with percent in them, like -# `feature%2Fcool` which is common on git flow branch names. - -from distutils.errors import DistutilsError -from distutils.dist import Distribution -import sysconfig -from numpy.distutils import log -from distutils.util import get_platform - -from numpy.distutils.exec_command import ( - find_executable, filepath_from_subprocess_output, - ) -from numpy.distutils.misc_util import (is_sequence, is_string, - get_shared_lib_extension) -from numpy.distutils.command.config import config as cmd_config -from numpy.distutils import customized_ccompiler as _customized_ccompiler -from numpy.distutils import _shell_utils -import distutils.ccompiler -import tempfile -import shutil - -__all__ = ['system_info'] - -# Determine number of bits -import platform -_bits = {'32bit': 32, '64bit': 64} -platform_bits = _bits[platform.architecture()[0]] - - -global_compiler = None - -def customized_ccompiler(): - global global_compiler - if not global_compiler: - global_compiler = _customized_ccompiler() - return global_compiler - - -def _c_string_literal(s): - """ - Convert a python string into a literal suitable for inclusion into C code - """ - # only these three characters are forbidden in C strings - s = s.replace('\\', r'\\') - s = s.replace('"', r'\"') - s = s.replace('\n', r'\n') - return '"{}"'.format(s) - - -def libpaths(paths, bits): - """Return a list of library paths valid on 32 or 64 bit systems. - - Inputs: - paths : sequence - A sequence of strings (typically paths) - bits : int - An integer, the only valid values are 32 or 64. A ValueError exception - is raised otherwise. - - Examples: - - Consider a list of directories - >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] - - For a 32-bit platform, this is already valid: - >>> np.distutils.system_info.libpaths(paths,32) - ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] - - On 64 bits, we prepend the '64' postfix - >>> np.distutils.system_info.libpaths(paths,64) - ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', - '/usr/lib64', '/usr/lib'] - """ - if bits not in (32, 64): - raise ValueError("Invalid bit size in libpaths: 32 or 64 only") - - # Handle 32bit case - if bits == 32: - return paths - - # Handle 64bit case - out = [] - for p in paths: - out.extend([p + '64', p]) - - return out - - -if sys.platform == 'win32': - default_lib_dirs = ['C:\\', - os.path.join(sysconfig.get_config_var('exec_prefix'), - 'libs')] - default_runtime_dirs = [] - default_include_dirs = [] - default_src_dirs = ['.'] - default_x11_lib_dirs = [] - default_x11_include_dirs = [] - _include_dirs = [ - 'include', - 'include/suitesparse', - ] - _lib_dirs = [ - 'lib', - ] - - _include_dirs = [d.replace('/', os.sep) for d in _include_dirs] - _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs] - def add_system_root(library_root): - """Add a package manager root to the include directories""" - global default_lib_dirs - global default_include_dirs - - library_root = os.path.normpath(library_root) - - default_lib_dirs.extend( - os.path.join(library_root, d) for d in _lib_dirs) - default_include_dirs.extend( - os.path.join(library_root, d) for d in _include_dirs) - - # VCpkg is the de-facto package manager on windows for C/C++ - # libraries. If it is on the PATH, then we append its paths here. - vcpkg = shutil.which('vcpkg') - if vcpkg: - vcpkg_dir = os.path.dirname(vcpkg) - if platform.architecture()[0] == '32bit': - specifier = 'x86' - else: - specifier = 'x64' - - vcpkg_installed = os.path.join(vcpkg_dir, 'installed') - for vcpkg_root in [ - os.path.join(vcpkg_installed, specifier + '-windows'), - os.path.join(vcpkg_installed, specifier + '-windows-static'), - ]: - add_system_root(vcpkg_root) - - # Conda is another popular package manager that provides libraries - conda = shutil.which('conda') - if conda: - conda_dir = os.path.dirname(conda) - add_system_root(os.path.join(conda_dir, '..', 'Library')) - add_system_root(os.path.join(conda_dir, 'Library')) - -else: - default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', - '/opt/local/lib', '/sw/lib'], platform_bits) - default_runtime_dirs = [] - default_include_dirs = ['/usr/local/include', - '/opt/include', - # path of umfpack under macports - '/opt/local/include/ufsparse', - '/opt/local/include', '/sw/include', - '/usr/include/suitesparse'] - default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src'] - - default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib', - '/usr/lib'], platform_bits) - default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include'] - - if os.path.exists('/usr/lib/X11'): - globbed_x11_dir = glob('/usr/lib/*/libX11.so') - if globbed_x11_dir: - x11_so_dir = os.path.split(globbed_x11_dir[0])[0] - default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11']) - default_x11_include_dirs.extend(['/usr/lib/X11/include', - '/usr/include/X11']) - - with open(os.devnull, 'w') as tmp: - try: - p = subprocess.Popen(["gcc", "-print-multiarch"], stdout=subprocess.PIPE, - stderr=tmp) - except (OSError, DistutilsError): - # OSError if gcc is not installed, or SandboxViolation (DistutilsError - # subclass) if an old setuptools bug is triggered (see gh-3160). - pass - else: - triplet = str(p.communicate()[0].decode().strip()) - if p.returncode == 0: - # gcc supports the "-print-multiarch" option - default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)] - default_lib_dirs += [os.path.join("/usr/lib/", triplet)] - - -if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: - default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib')) - default_include_dirs.append(os.path.join(sys.prefix, 'include')) - default_src_dirs.append(os.path.join(sys.prefix, 'src')) - -default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)] -default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)] -default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)] -default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] - -so_ext = get_shared_lib_extension() - - -def get_standard_file(fname): - """Returns a list of files named 'fname' from - 1) System-wide directory (directory-location of this module) - 2) Users HOME directory (os.environ['HOME']) - 3) Local directory - """ - # System-wide file - filenames = [] - try: - f = __file__ - except NameError: - f = sys.argv[0] - sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], - fname) - if os.path.isfile(sysfile): - filenames.append(sysfile) - - # Home directory - # And look for the user config file - try: - f = os.path.expanduser('~') - except KeyError: - pass - else: - user_file = os.path.join(f, fname) - if os.path.isfile(user_file): - filenames.append(user_file) - - # Local file - if os.path.isfile(fname): - filenames.append(os.path.abspath(fname)) - - return filenames - - -def _parse_env_order(base_order, env): - """ Parse an environment variable `env` by splitting with "," and only returning elements from `base_order` - - This method will sequence the environment variable and check for their - individual elements in `base_order`. - - The items in the environment variable may be negated via '^item' or '!itema,itemb'. - It must start with ^/! to negate all options. - - Raises - ------ - ValueError: for mixed negated and non-negated orders or multiple negated orders - - Parameters - ---------- - base_order : list of str - the base list of orders - env : str - the environment variable to be parsed, if none is found, `base_order` is returned - - Returns - ------- - allow_order : list of str - allowed orders in lower-case - unknown_order : list of str - for values not overlapping with `base_order` - """ - order_str = os.environ.get(env, None) - - # ensure all base-orders are lower-case (for easier comparison) - base_order = [order.lower() for order in base_order] - if order_str is None: - return base_order, [] - - neg = order_str.startswith(('^', '!')) - # Check format - order_str_l = list(order_str) - sum_neg = order_str_l.count('^') + order_str_l.count('!') - if neg: - if sum_neg > 1: - raise ValueError(f"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}") - # remove prefix - order_str = order_str[1:] - elif sum_neg > 0: - raise ValueError(f"Environment variable '{env}' may not mix negated an non-negated items: {order_str}") - - # Split and lower case - orders = order_str.lower().split(',') - - # to inform callee about non-overlapping elements - unknown_order = [] - - # if negated, we have to remove from the order - if neg: - allow_order = base_order.copy() - - for order in orders: - if not order: - continue - - if order not in base_order: - unknown_order.append(order) - continue - - if order in allow_order: - allow_order.remove(order) - - else: - allow_order = [] - - for order in orders: - if not order: - continue - - if order not in base_order: - unknown_order.append(order) - continue - - if order not in allow_order: - allow_order.append(order) - - return allow_order, unknown_order - - -def get_info(name, notfound_action=0): - """ - notfound_action: - 0 - do nothing - 1 - display warning message - 2 - raise error - """ - cl = {'armpl': armpl_info, - 'blas_armpl': blas_armpl_info, - 'lapack_armpl': lapack_armpl_info, - 'fftw3_armpl': fftw3_armpl_info, - 'atlas': atlas_info, # use lapack_opt or blas_opt instead - 'atlas_threads': atlas_threads_info, # ditto - 'atlas_blas': atlas_blas_info, - 'atlas_blas_threads': atlas_blas_threads_info, - 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead - 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto - 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead - 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto - 'atlas_3_10_blas': atlas_3_10_blas_info, - 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info, - 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead - 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto - 'flame': flame_info, # use lapack_opt instead - 'mkl': mkl_info, - 'ssl2': ssl2_info, - # openblas which may or may not have embedded lapack - 'openblas': openblas_info, # use blas_opt instead - # openblas with embedded lapack - 'openblas_lapack': openblas_lapack_info, # use blas_opt instead - 'openblas_clapack': openblas_clapack_info, # use blas_opt instead - 'blis': blis_info, # use blas_opt instead - 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead - 'blas_mkl': blas_mkl_info, # use blas_opt instead - 'lapack_ssl2': lapack_ssl2_info, - 'blas_ssl2': blas_ssl2_info, - 'accelerate': accelerate_info, # use blas_opt instead - 'accelerate_lapack': accelerate_lapack_info, - 'openblas64_': openblas64__info, - 'openblas64__lapack': openblas64__lapack_info, - 'openblas_ilp64': openblas_ilp64_info, - 'openblas_ilp64_lapack': openblas_ilp64_lapack_info, - 'x11': x11_info, - 'fft_opt': fft_opt_info, - 'fftw': fftw_info, - 'fftw2': fftw2_info, - 'fftw3': fftw3_info, - 'dfftw': dfftw_info, - 'sfftw': sfftw_info, - 'fftw_threads': fftw_threads_info, - 'dfftw_threads': dfftw_threads_info, - 'sfftw_threads': sfftw_threads_info, - 'djbfft': djbfft_info, - 'blas': blas_info, # use blas_opt instead - 'lapack': lapack_info, # use lapack_opt instead - 'lapack_src': lapack_src_info, - 'blas_src': blas_src_info, - 'numpy': numpy_info, - 'f2py': f2py_info, - 'Numeric': Numeric_info, - 'numeric': Numeric_info, - 'numarray': numarray_info, - 'numerix': numerix_info, - 'lapack_opt': lapack_opt_info, - 'lapack_ilp64_opt': lapack_ilp64_opt_info, - 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info, - 'lapack64__opt': lapack64__opt_info, - 'blas_opt': blas_opt_info, - 'blas_ilp64_opt': blas_ilp64_opt_info, - 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info, - 'blas64__opt': blas64__opt_info, - 'boost_python': boost_python_info, - 'agg2': agg2_info, - 'wx': wx_info, - 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info, - 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info, - 'gdk_pixbuf_2': gdk_pixbuf_2_info, - 'gdk-pixbuf-2.0': gdk_pixbuf_2_info, - 'gdk': gdk_info, - 'gdk_2': gdk_2_info, - 'gdk-2.0': gdk_2_info, - 'gdk_x11_2': gdk_x11_2_info, - 'gdk-x11-2.0': gdk_x11_2_info, - 'gtkp_x11_2': gtkp_x11_2_info, - 'gtk+-x11-2.0': gtkp_x11_2_info, - 'gtkp_2': gtkp_2_info, - 'gtk+-2.0': gtkp_2_info, - 'xft': xft_info, - 'freetype2': freetype2_info, - 'umfpack': umfpack_info, - 'amd': amd_info, - }.get(name.lower(), system_info) - return cl().get_info(notfound_action) - - -class NotFoundError(DistutilsError): - """Some third-party program or library is not found.""" - - -class AliasedOptionError(DistutilsError): - """ - Aliases entries in config files should not be existing. - In section '{section}' we found multiple appearances of options {options}.""" - - -class AtlasNotFoundError(NotFoundError): - """ - Atlas (http://github.com/math-atlas/math-atlas) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [atlas]) or by setting - the ATLAS environment variable.""" - - -class FlameNotFoundError(NotFoundError): - """ - FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [flame]).""" - - -class LapackNotFoundError(NotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [lapack]) or by setting - the LAPACK environment variable.""" - - -class LapackSrcNotFoundError(LapackNotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [lapack_src]) or by setting - the LAPACK_SRC environment variable.""" - - -class LapackILP64NotFoundError(NotFoundError): - """ - 64-bit Lapack libraries not found. - Known libraries in numpy/distutils/site.cfg file are: - openblas64_, openblas_ilp64 - """ - -class BlasOptNotFoundError(NotFoundError): - """ - Optimized (vendor) Blas libraries are not found. - Falls back to netlib Blas library which has worse performance. - A better performance should be easily gained by switching - Blas library.""" - -class BlasNotFoundError(NotFoundError): - """ - Blas (http://www.netlib.org/blas/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [blas]) or by setting - the BLAS environment variable.""" - -class BlasILP64NotFoundError(NotFoundError): - """ - 64-bit Blas libraries not found. - Known libraries in numpy/distutils/site.cfg file are: - openblas64_, openblas_ilp64 - """ - -class BlasSrcNotFoundError(BlasNotFoundError): - """ - Blas (http://www.netlib.org/blas/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [blas_src]) or by setting - the BLAS_SRC environment variable.""" - - -class FFTWNotFoundError(NotFoundError): - """ - FFTW (http://www.fftw.org/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [fftw]) or by setting - the FFTW environment variable.""" - - -class DJBFFTNotFoundError(NotFoundError): - """ - DJBFFT (https://cr.yp.to/djbfft.html) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [djbfft]) or by setting - the DJBFFT environment variable.""" - - -class NumericNotFoundError(NotFoundError): - """ - Numeric (https://www.numpy.org/) module not found. - Get it from above location, install it, and retry setup.py.""" - - -class X11NotFoundError(NotFoundError): - """X11 libraries not found.""" - - -class UmfpackNotFoundError(NotFoundError): - """ - UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/) - not found. Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [umfpack]) or by setting - the UMFPACK environment variable.""" - - -class system_info: - - """ get_info() is the only public method. Don't use others. - """ - dir_env_var = None - # XXX: search_static_first is disabled by default, may disappear in - # future unless it is proved to be useful. - search_static_first = 0 - # The base-class section name is a random word "ALL" and is not really - # intended for general use. It cannot be None nor can it be DEFAULT as - # these break the ConfigParser. See gh-15338 - section = 'ALL' - saved_results = {} - - notfounderror = NotFoundError - - def __init__(self, - default_lib_dirs=default_lib_dirs, - default_include_dirs=default_include_dirs, - ): - self.__class__.info = {} - self.local_prefixes = [] - defaults = {'library_dirs': os.pathsep.join(default_lib_dirs), - 'include_dirs': os.pathsep.join(default_include_dirs), - 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs), - 'rpath': '', - 'src_dirs': os.pathsep.join(default_src_dirs), - 'search_static_first': str(self.search_static_first), - 'extra_compile_args': '', 'extra_link_args': ''} - self.cp = ConfigParser(defaults) - self.files = [] - self.files.extend(get_standard_file('.numpy-site.cfg')) - self.files.extend(get_standard_file('site.cfg')) - self.parse_config_files() - - if self.section is not None: - self.search_static_first = self.cp.getboolean( - self.section, 'search_static_first') - assert isinstance(self.search_static_first, int) - - def parse_config_files(self): - self.cp.read(self.files) - if not self.cp.has_section(self.section): - if self.section is not None: - self.cp.add_section(self.section) - - def calc_libraries_info(self): - libs = self.get_libraries() - dirs = self.get_lib_dirs() - # The extensions use runtime_library_dirs - r_dirs = self.get_runtime_lib_dirs() - # Intrinsic distutils use rpath, we simply append both entries - # as though they were one entry - r_dirs.extend(self.get_runtime_lib_dirs(key='rpath')) - info = {} - for lib in libs: - i = self.check_libs(dirs, [lib]) - if i is not None: - dict_append(info, **i) - else: - log.info('Library %s was not found. Ignoring' % (lib)) - - if r_dirs: - i = self.check_libs(r_dirs, [lib]) - if i is not None: - # Swap library keywords found to runtime_library_dirs - # the libraries are insisting on the user having defined - # them using the library_dirs, and not necessarily by - # runtime_library_dirs - del i['libraries'] - i['runtime_library_dirs'] = i.pop('library_dirs') - dict_append(info, **i) - else: - log.info('Runtime library %s was not found. Ignoring' % (lib)) - - return info - - def set_info(self, **info): - if info: - lib_info = self.calc_libraries_info() - dict_append(info, **lib_info) - # Update extra information - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - self.saved_results[self.__class__.__name__] = info - - def get_option_single(self, *options): - """ Ensure that only one of `options` are found in the section - - Parameters - ---------- - *options : list of str - a list of options to be found in the section (``self.section``) - - Returns - ------- - str : - the option that is uniquely found in the section - - Raises - ------ - AliasedOptionError : - in case more than one of the options are found - """ - found = [self.cp.has_option(self.section, opt) for opt in options] - if sum(found) == 1: - return options[found.index(True)] - elif sum(found) == 0: - # nothing is found anyways - return options[0] - - # Else we have more than 1 key found - if AliasedOptionError.__doc__ is None: - raise AliasedOptionError() - raise AliasedOptionError(AliasedOptionError.__doc__.format( - section=self.section, options='[{}]'.format(', '.join(options)))) - - - def has_info(self): - return self.__class__.__name__ in self.saved_results - - def calc_extra_info(self): - """ Updates the information in the current information with - respect to these flags: - extra_compile_args - extra_link_args - """ - info = {} - for key in ['extra_compile_args', 'extra_link_args']: - # Get values - opt = self.cp.get(self.section, key) - opt = _shell_utils.NativeParser.split(opt) - if opt: - tmp = {key: opt} - dict_append(info, **tmp) - return info - - def get_info(self, notfound_action=0): - """ Return a dictionary with items that are compatible - with numpy.distutils.setup keyword arguments. - """ - flag = 0 - if not self.has_info(): - flag = 1 - log.info(self.__class__.__name__ + ':') - if hasattr(self, 'calc_info'): - self.calc_info() - if notfound_action: - if not self.has_info(): - if notfound_action == 1: - warnings.warn(self.notfounderror.__doc__, stacklevel=2) - elif notfound_action == 2: - raise self.notfounderror(self.notfounderror.__doc__) - else: - raise ValueError(repr(notfound_action)) - - if not self.has_info(): - log.info(' NOT AVAILABLE') - self.set_info() - else: - log.info(' FOUND:') - - res = self.saved_results.get(self.__class__.__name__) - if log.get_threshold() <= log.INFO and flag: - for k, v in res.items(): - v = str(v) - if k in ['sources', 'libraries'] and len(v) > 270: - v = v[:120] + '...\n...\n...' + v[-120:] - log.info(' %s = %s', k, v) - log.info('') - - return copy.deepcopy(res) - - def get_paths(self, section, key): - dirs = self.cp.get(section, key).split(os.pathsep) - env_var = self.dir_env_var - if env_var: - if is_sequence(env_var): - e0 = env_var[-1] - for e in env_var: - if e in os.environ: - e0 = e - break - if not env_var[0] == e0: - log.info('Setting %s=%s' % (env_var[0], e0)) - env_var = e0 - if env_var and env_var in os.environ: - d = os.environ[env_var] - if d == 'None': - log.info('Disabled %s: %s', - self.__class__.__name__, '(%s is None)' - % (env_var,)) - return [] - if os.path.isfile(d): - dirs = [os.path.dirname(d)] + dirs - l = getattr(self, '_lib_names', []) - if len(l) == 1: - b = os.path.basename(d) - b = os.path.splitext(b)[0] - if b[:3] == 'lib': - log.info('Replacing _lib_names[0]==%r with %r' \ - % (self._lib_names[0], b[3:])) - self._lib_names[0] = b[3:] - else: - ds = d.split(os.pathsep) - ds2 = [] - for d in ds: - if os.path.isdir(d): - ds2.append(d) - for dd in ['include', 'lib']: - d1 = os.path.join(d, dd) - if os.path.isdir(d1): - ds2.append(d1) - dirs = ds2 + dirs - default_dirs = self.cp.get(self.section, key).split(os.pathsep) - dirs.extend(default_dirs) - ret = [] - for d in dirs: - if len(d) > 0 and not os.path.isdir(d): - warnings.warn('Specified path %s is invalid.' % d, stacklevel=2) - continue - - if d not in ret: - ret.append(d) - - log.debug('( %s = %s )', key, ':'.join(ret)) - return ret - - def get_lib_dirs(self, key='library_dirs'): - return self.get_paths(self.section, key) - - def get_runtime_lib_dirs(self, key='runtime_library_dirs'): - path = self.get_paths(self.section, key) - if path == ['']: - path = [] - return path - - def get_include_dirs(self, key='include_dirs'): - return self.get_paths(self.section, key) - - def get_src_dirs(self, key='src_dirs'): - return self.get_paths(self.section, key) - - def get_libs(self, key, default): - try: - libs = self.cp.get(self.section, key) - except NoOptionError: - if not default: - return [] - if is_string(default): - return [default] - return default - return [b for b in [a.strip() for a in libs.split(',')] if b] - - def get_libraries(self, key='libraries'): - if hasattr(self, '_lib_names'): - return self.get_libs(key, default=self._lib_names) - else: - return self.get_libs(key, '') - - def library_extensions(self): - c = customized_ccompiler() - static_exts = [] - if c.compiler_type != 'msvc': - # MSVC doesn't understand binutils - static_exts.append('.a') - if sys.platform == 'win32': - static_exts.append('.lib') # .lib is used by MSVC and others - if self.search_static_first: - exts = static_exts + [so_ext] - else: - exts = [so_ext] + static_exts - if sys.platform == 'cygwin': - exts.append('.dll.a') - if sys.platform == 'darwin': - exts.append('.dylib') - return exts - - def check_libs(self, lib_dirs, libs, opt_libs=[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks for all libraries as shared libraries first, then - static (or vice versa if self.search_static_first is True). - """ - exts = self.library_extensions() - info = None - for ext in exts: - info = self._check_libs(lib_dirs, libs, opt_libs, [ext]) - if info is not None: - break - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), - lib_dirs) - return info - - def check_libs2(self, lib_dirs, libs, opt_libs=[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks each library for shared or static. - """ - exts = self.library_extensions() - info = self._check_libs(lib_dirs, libs, opt_libs, exts) - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), - lib_dirs) - - return info - - def _find_lib(self, lib_dir, lib, exts): - assert is_string(lib_dir) - # under windows first try without 'lib' prefix - if sys.platform == 'win32': - lib_prefixes = ['', 'lib'] - else: - lib_prefixes = ['lib'] - # for each library name, see if we can find a file for it. - for ext in exts: - for prefix in lib_prefixes: - p = self.combine_paths(lib_dir, prefix + lib + ext) - if p: - break - if p: - assert len(p) == 1 - # ??? splitext on p[0] would do this for cygwin - # doesn't seem correct - if ext == '.dll.a': - lib += '.dll' - if ext == '.lib': - lib = prefix + lib - return lib - - return False - - def _find_libs(self, lib_dirs, libs, exts): - # make sure we preserve the order of libs, as it can be important - found_dirs, found_libs = [], [] - for lib in libs: - for lib_dir in lib_dirs: - found_lib = self._find_lib(lib_dir, lib, exts) - if found_lib: - found_libs.append(found_lib) - if lib_dir not in found_dirs: - found_dirs.append(lib_dir) - break - return found_dirs, found_libs - - def _check_libs(self, lib_dirs, libs, opt_libs, exts): - """Find mandatory and optional libs in expected paths. - - Missing optional libraries are silently forgotten. - """ - if not is_sequence(lib_dirs): - lib_dirs = [lib_dirs] - # First, try to find the mandatory libraries - found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts) - if len(found_libs) > 0 and len(found_libs) == len(libs): - # Now, check for optional libraries - opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts) - found_libs.extend(opt_found_libs) - for lib_dir in opt_found_dirs: - if lib_dir not in found_dirs: - found_dirs.append(lib_dir) - info = {'libraries': found_libs, 'library_dirs': found_dirs} - return info - else: - return None - - def combine_paths(self, *args): - """Return a list of existing paths composed by all combinations - of items from the arguments. - """ - return combine_paths(*args) - - -class fft_opt_info(system_info): - - def calc_info(self): - info = {} - fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') - djbfft_info = get_info('djbfft') - if fftw_info: - dict_append(info, **fftw_info) - if djbfft_info: - dict_append(info, **djbfft_info) - self.set_info(**info) - return - - -class fftw_info(system_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H', None)]}, - {'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h', 'rfftw.h'], - 'macros':[('SCIPY_FFTW_H', None)]}] - - def calc_ver_info(self, ver_param): - """Returns True on successful version detection, else False""" - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - - opt = self.get_option_single(self.section + '_libs', 'libraries') - libs = self.get_libs(opt, ver_param['libs']) - info = self.check_libs(lib_dirs, libs) - if info is not None: - flag = 0 - for d in incl_dirs: - if len(self.combine_paths(d, ver_param['includes'])) \ - == len(ver_param['includes']): - dict_append(info, include_dirs=[d]) - flag = 1 - break - if flag: - dict_append(info, define_macros=ver_param['macros']) - else: - info = None - if info is not None: - self.set_info(**info) - return True - else: - log.info(' %s not found' % (ver_param['name'])) - return False - - def calc_info(self): - for i in self.ver_info: - if self.calc_ver_info(i): - break - - -class fftw2_info(fftw_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h', 'rfftw.h'], - 'macros':[('SCIPY_FFTW_H', None)]} - ] - - -class fftw3_info(fftw_info): - #variables to override - section = 'fftw3' - dir_env_var = 'FFTW3' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H', None)]}, - ] - - -class fftw3_armpl_info(fftw_info): - section = 'fftw3' - dir_env_var = 'ARMPL_DIR' - notfounderror = FFTWNotFoundError - ver_info = [{'name': 'fftw3', - 'libs': ['armpl_lp64_mp'], - 'includes': ['fftw3.h'], - 'macros': [('SCIPY_FFTW3_H', None)]}] - - -class dfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'dfftw', - 'libs':['drfftw', 'dfftw'], - 'includes':['dfftw.h', 'drfftw.h'], - 'macros':[('SCIPY_DFFTW_H', None)]}] - - -class sfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'sfftw', - 'libs':['srfftw', 'sfftw'], - 'includes':['sfftw.h', 'srfftw.h'], - 'macros':[('SCIPY_SFFTW_H', None)]}] - - -class fftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'fftw threads', - 'libs':['rfftw_threads', 'fftw_threads'], - 'includes':['fftw_threads.h', 'rfftw_threads.h'], - 'macros':[('SCIPY_FFTW_THREADS_H', None)]}] - - -class dfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'dfftw threads', - 'libs':['drfftw_threads', 'dfftw_threads'], - 'includes':['dfftw_threads.h', 'drfftw_threads.h'], - 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}] - - -class sfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'sfftw threads', - 'libs':['srfftw_threads', 'sfftw_threads'], - 'includes':['sfftw_threads.h', 'srfftw_threads.h'], - 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}] - - -class djbfft_info(system_info): - section = 'djbfft' - dir_env_var = 'DJBFFT' - notfounderror = DJBFFTNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d, ['djbfft']) + [d]) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - info = None - for d in lib_dirs: - p = self.combine_paths(d, ['djbfft.a']) - if p: - info = {'extra_objects': p} - break - p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext]) - if p: - info = {'libraries': ['djbfft'], 'library_dirs': [d]} - break - if info is None: - return - for d in incl_dirs: - if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2: - dict_append(info, include_dirs=[d], - define_macros=[('SCIPY_DJBFFT_H', None)]) - self.set_info(**info) - return - return - - -class mkl_info(system_info): - section = 'mkl' - dir_env_var = 'MKLROOT' - _lib_mkl = ['mkl_rt'] - - def get_mkl_rootdir(self): - mklroot = os.environ.get('MKLROOT', None) - if mklroot is not None: - return mklroot - paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep) - ld_so_conf = '/etc/ld.so.conf' - if os.path.isfile(ld_so_conf): - with open(ld_so_conf) as f: - for d in f: - d = d.strip() - if d: - paths.append(d) - intel_mkl_dirs = [] - for path in paths: - path_atoms = path.split(os.sep) - for m in path_atoms: - if m.startswith('mkl'): - d = os.sep.join(path_atoms[:path_atoms.index(m) + 2]) - intel_mkl_dirs.append(d) - break - for d in paths: - dirs = glob(os.path.join(d, 'mkl', '*')) - dirs += glob(os.path.join(d, 'mkl*')) - for sub_dir in dirs: - if os.path.isdir(os.path.join(sub_dir, 'lib')): - return sub_dir - return None - - def __init__(self): - mklroot = self.get_mkl_rootdir() - if mklroot is None: - system_info.__init__(self) - else: - from .cpuinfo import cpu - if cpu.is_Itanium(): - plt = '64' - elif cpu.is_Intel() and cpu.is_64bit(): - plt = 'intel64' - else: - plt = '32' - system_info.__init__( - self, - default_lib_dirs=[os.path.join(mklroot, 'lib', plt)], - default_include_dirs=[os.path.join(mklroot, 'include')]) - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - opt = self.get_option_single('mkl_libs', 'libraries') - mkl_libs = self.get_libs(opt, self._lib_mkl) - info = self.check_libs2(lib_dirs, mkl_libs) - if info is None: - return - dict_append(info, - define_macros=[('SCIPY_MKL_H', None), - ('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - if sys.platform == 'win32': - pass # win32 has no pthread library - else: - dict_append(info, libraries=['pthread']) - self.set_info(**info) - - -class lapack_mkl_info(mkl_info): - pass - - -class blas_mkl_info(mkl_info): - pass - - -class ssl2_info(system_info): - section = 'ssl2' - dir_env_var = 'SSL2_DIR' - # Multi-threaded version. Python itself must be built by Fujitsu compiler. - _lib_ssl2 = ['fjlapackexsve'] - # Single-threaded version - #_lib_ssl2 = ['fjlapacksve'] - - def get_tcsds_rootdir(self): - tcsdsroot = os.environ.get('TCSDS_PATH', None) - if tcsdsroot is not None: - return tcsdsroot - return None - - def __init__(self): - tcsdsroot = self.get_tcsds_rootdir() - if tcsdsroot is None: - system_info.__init__(self) - else: - system_info.__init__( - self, - default_lib_dirs=[os.path.join(tcsdsroot, 'lib64')], - default_include_dirs=[os.path.join(tcsdsroot, - 'clang-comp/include')]) - - def calc_info(self): - tcsdsroot = self.get_tcsds_rootdir() - - lib_dirs = self.get_lib_dirs() - if lib_dirs is None: - lib_dirs = os.path.join(tcsdsroot, 'lib64') - - incl_dirs = self.get_include_dirs() - if incl_dirs is None: - incl_dirs = os.path.join(tcsdsroot, 'clang-comp/include') - - ssl2_libs = self.get_libs('ssl2_libs', self._lib_ssl2) - - info = self.check_libs2(lib_dirs, ssl2_libs) - if info is None: - return - dict_append(info, - define_macros=[('HAVE_CBLAS', None), - ('HAVE_SSL2', 1)], - include_dirs=incl_dirs,) - self.set_info(**info) - - -class lapack_ssl2_info(ssl2_info): - pass - - -class blas_ssl2_info(ssl2_info): - pass - - - -class armpl_info(system_info): - section = 'armpl' - dir_env_var = 'ARMPL_DIR' - _lib_armpl = ['armpl_lp64_mp'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - armpl_libs = self.get_libs('armpl_libs', self._lib_armpl) - info = self.check_libs2(lib_dirs, armpl_libs) - if info is None: - return - dict_append(info, - define_macros=[('SCIPY_MKL_H', None), - ('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - self.set_info(**info) - -class lapack_armpl_info(armpl_info): - pass - -class blas_armpl_info(armpl_info): - pass - - -class atlas_info(system_info): - section = 'atlas' - dir_env_var = 'ATLAS' - _lib_names = ['f77blas', 'cblas'] - if sys.platform[:7] == 'freebsd': - _lib_atlas = ['atlas_r'] - _lib_lapack = ['alapack_r'] - else: - _lib_atlas = ['atlas'] - _lib_lapack = ['lapack'] - - notfounderror = AtlasNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*', - 'sse', '3dnow', 'sse2']) + [d]) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_libs', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) - lapack_libs = self.get_libs('lapack_libs', self._lib_lapack) - atlas = None - lapack = None - atlas_1 = None - for d in lib_dirs: - atlas = self.check_libs2(d, atlas_libs, []) - if atlas is not None: - lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*']) - lapack = self.check_libs2(lib_dirs2, lapack_libs, []) - if lapack is not None: - break - if atlas: - atlas_1 = atlas - log.info(self.__class__) - if atlas is None: - atlas = atlas_1 - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - if lapack is not None: - dict_append(info, **lapack) - dict_append(info, **atlas) - elif 'lapack_atlas' in atlas['libraries']: - dict_append(info, **atlas) - dict_append(info, - define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)]) - self.set_info(**info) - return - else: - dict_append(info, **atlas) - dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)]) - message = textwrap.dedent(""" - ********************************************************************* - Could not find lapack library within the ATLAS installation. - ********************************************************************* - """) - warnings.warn(message, stacklevel=2) - self.set_info(**info) - return - - # Check if lapack library is complete, only warn if it is not. - lapack_dir = lapack['library_dirs'][0] - lapack_name = lapack['libraries'][0] - lapack_lib = None - lib_prefixes = ['lib'] - if sys.platform == 'win32': - lib_prefixes.append('') - for e in self.library_extensions(): - for prefix in lib_prefixes: - fn = os.path.join(lapack_dir, prefix + lapack_name + e) - if os.path.exists(fn): - lapack_lib = fn - break - if lapack_lib: - break - if lapack_lib is not None: - sz = os.stat(lapack_lib)[6] - if sz <= 4000 * 1024: - message = textwrap.dedent(""" - ********************************************************************* - Lapack library (from ATLAS) is probably incomplete: - size of %s is %sk (expected >4000k) - - Follow the instructions in the KNOWN PROBLEMS section of the file - numpy/INSTALL.txt. - ********************************************************************* - """) % (lapack_lib, sz / 1024) - warnings.warn(message, stacklevel=2) - else: - info['language'] = 'f77' - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(info, **atlas_extra_info) - - self.set_info(**info) - - -class atlas_blas_info(atlas_info): - _lib_names = ['f77blas', 'cblas'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_libs', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) - atlas = self.check_libs2(lib_dirs, atlas_libs, []) - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - info['define_macros'] = [('HAVE_CBLAS', None)] - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(atlas, **atlas_extra_info) - - dict_append(info, **atlas) - - self.set_info(**info) - return - - -class atlas_threads_info(atlas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['ptf77blas', 'ptcblas'] - - -class atlas_blas_threads_info(atlas_blas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['ptf77blas', 'ptcblas'] - - -class lapack_atlas_info(atlas_info): - _lib_names = ['lapack_atlas'] + atlas_info._lib_names - - -class lapack_atlas_threads_info(atlas_threads_info): - _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names - - -class atlas_3_10_info(atlas_info): - _lib_names = ['satlas'] - _lib_atlas = _lib_names - _lib_lapack = _lib_names - - -class atlas_3_10_blas_info(atlas_3_10_info): - _lib_names = ['satlas'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_lib', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names) - atlas = self.check_libs2(lib_dirs, atlas_libs, []) - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - info['define_macros'] = [('HAVE_CBLAS', None)] - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(atlas, **atlas_extra_info) - - dict_append(info, **atlas) - - self.set_info(**info) - return - - -class atlas_3_10_threads_info(atlas_3_10_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['tatlas'] - _lib_atlas = _lib_names - _lib_lapack = _lib_names - - -class atlas_3_10_blas_threads_info(atlas_3_10_blas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['tatlas'] - - -class lapack_atlas_3_10_info(atlas_3_10_info): - pass - - -class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info): - pass - - -class lapack_info(system_info): - section = 'lapack' - dir_env_var = 'LAPACK' - _lib_names = ['lapack'] - notfounderror = LapackNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('lapack_libs', 'libraries') - lapack_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, lapack_libs, []) - if info is None: - return - info['language'] = 'f77' - self.set_info(**info) - - -class lapack_src_info(system_info): - # LAPACK_SRC is deprecated, please do not use this! - # Build or install a BLAS library via your package manager or from - # source separately. - section = 'lapack_src' - dir_env_var = 'LAPACK_SRC' - notfounderror = LapackSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'dgesv.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - # The following is extracted from LAPACK-3.0/SRC/Makefile. - # Added missing names from lapack-lite-3.1.1/SRC/Makefile - # while keeping removed names for Lapack-3.0 compatibility. - allaux = ''' - ilaenv ieeeck lsame lsamen xerbla - iparmq - ''' # *.f - laux = ''' - bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 - laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 - lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre - larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 - lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 - lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf - stebz stedc steqr sterf - - larra larrc larrd larr larrk larrj larrr laneg laisnan isnan - lazq3 lazq4 - ''' # [s|d]*.f - lasrc = ''' - gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak - gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv - gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 - geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd - gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal - gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd - ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein - hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 - lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb - lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp - laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv - lartv larz larzb larzt laswp lasyf latbs latdf latps latrd - latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv - pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 - potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri - pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs - spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv - sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 - tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs - trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs - tzrqf tzrzf - - lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 - ''' # [s|c|d|z]*.f - sd_lasrc = ''' - laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l - org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr - orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 - ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx - sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd - stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd - sygvx sytd2 sytrd - ''' # [s|d]*.f - cz_lasrc = ''' - bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev - heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv - hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd - hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf - hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 - laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe - laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv - spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq - ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 - unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr - ''' # [c|z]*.f - ####### - sclaux = laux + ' econd ' # s*.f - dzlaux = laux + ' secnd ' # d*.f - slasrc = lasrc + sd_lasrc # s*.f - dlasrc = lasrc + sd_lasrc # d*.f - clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f - zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f - oclasrc = ' icmax1 scsum1 ' # *.f - ozlasrc = ' izmax1 dzsum1 ' # *.f - sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \ - + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \ - + ['c%s.f' % f for f in (clasrc).split()] \ - + ['z%s.f' % f for f in (zlasrc).split()] \ - + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()] - sources = [os.path.join(src_dir, f) for f in sources] - # Lapack 3.1: - src_dir2 = os.path.join(src_dir, '..', 'INSTALL') - sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz'] - # Lapack 3.2.1: - sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz'] - sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz'] - sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz'] - # Should we check here actual existence of source files? - # Yes, the file listing is different between 3.0 and 3.1 - # versions. - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources': sources, 'language': 'f77'} - self.set_info(**info) - -atlas_version_c_text = r''' -/* This file is generated from numpy/distutils/system_info.py */ -void ATL_buildinfo(void); -int main(void) { - ATL_buildinfo(); - return 0; -} -''' - -_cached_atlas_version = {} - - -def get_atlas_version(**config): - libraries = config.get('libraries', []) - library_dirs = config.get('library_dirs', []) - key = (tuple(libraries), tuple(library_dirs)) - if key in _cached_atlas_version: - return _cached_atlas_version[key] - c = cmd_config(Distribution()) - atlas_version = None - info = {} - try: - s, o = c.get_output(atlas_version_c_text, - libraries=libraries, library_dirs=library_dirs, - ) - if s and re.search(r'undefined reference to `_gfortran', o, re.M): - s, o = c.get_output(atlas_version_c_text, - libraries=libraries + ['gfortran'], - library_dirs=library_dirs, - ) - if not s: - warnings.warn(textwrap.dedent(""" - ***************************************************** - Linkage with ATLAS requires gfortran. Use - - python setup.py config_fc --fcompiler=gnu95 ... - - when building extension libraries that use ATLAS. - Make sure that -lgfortran is used for C++ extensions. - ***************************************************** - """), stacklevel=2) - dict_append(info, language='f90', - define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)]) - except Exception: # failed to get version from file -- maybe on Windows - # look at directory name - for o in library_dirs: - m = re.search(r'ATLAS_(?P\d+[.]\d+[.]\d+)_', o) - if m: - atlas_version = m.group('version') - if atlas_version is not None: - break - - # final choice --- look at ATLAS_VERSION environment - # variable - if atlas_version is None: - atlas_version = os.environ.get('ATLAS_VERSION', None) - if atlas_version: - dict_append(info, define_macros=[( - 'ATLAS_INFO', _c_string_literal(atlas_version)) - ]) - else: - dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)]) - return atlas_version or '?.?.?', info - - if not s: - m = re.search(r'ATLAS version (?P\d+[.]\d+[.]\d+)', o) - if m: - atlas_version = m.group('version') - if atlas_version is None: - if re.search(r'undefined symbol: ATL_buildinfo', o, re.M): - atlas_version = '3.2.1_pre3.3.6' - else: - log.info('Status: %d', s) - log.info('Output: %s', o) - - elif atlas_version == '3.2.1_pre3.3.6': - dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)]) - else: - dict_append(info, define_macros=[( - 'ATLAS_INFO', _c_string_literal(atlas_version)) - ]) - result = _cached_atlas_version[key] = atlas_version, info - return result - - -class lapack_opt_info(system_info): - notfounderror = LapackNotFoundError - - # List of all known LAPACK libraries, in the default order - lapack_order = ['armpl', 'mkl', 'ssl2', 'openblas', 'flame', - 'accelerate', 'atlas', 'lapack'] - order_env_var_name = 'NPY_LAPACK_ORDER' - - def _calc_info_armpl(self): - info = get_info('lapack_armpl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_mkl(self): - info = get_info('lapack_mkl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_ssl2(self): - info = get_info('lapack_ssl2') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_openblas(self): - info = get_info('openblas_lapack') - if info: - self.set_info(**info) - return True - info = get_info('openblas_clapack') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_flame(self): - info = get_info('flame') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_atlas(self): - info = get_info('atlas_3_10_threads') - if not info: - info = get_info('atlas_3_10') - if not info: - info = get_info('atlas_threads') - if not info: - info = get_info('atlas') - if info: - # Figure out if ATLAS has lapack... - # If not we need the lapack library, but not BLAS! - l = info.get('define_macros', []) - if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \ - or ('ATLAS_WITHOUT_LAPACK', None) in l: - # Get LAPACK (with possible warnings) - # If not found we don't accept anything - # since we can't use ATLAS with LAPACK! - lapack_info = self._get_info_lapack() - if not lapack_info: - return False - dict_append(info, **lapack_info) - self.set_info(**info) - return True - return False - - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - - def _get_info_blas(self): - # Default to get the optimized BLAS implementation - info = get_info('blas_opt') - if not info: - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) - info_src = get_info('blas_src') - if not info_src: - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) - return {} - dict_append(info, libraries=[('fblas_src', info_src)]) - return info - - def _get_info_lapack(self): - info = get_info('lapack') - if not info: - warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3) - info_src = get_info('lapack_src') - if not info_src: - warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3) - return {} - dict_append(info, libraries=[('flapack_src', info_src)]) - return info - - def _calc_info_lapack(self): - info = self._get_info_lapack() - if info: - info_blas = self._get_info_blas() - dict_append(info, **info_blas) - dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) - self.set_info(**info) - return True - return False - - def _calc_info_from_envvar(self): - info = {} - info['language'] = 'f77' - info['libraries'] = [] - info['include_dirs'] = [] - info['define_macros'] = [] - info['extra_link_args'] = os.environ['NPY_LAPACK_LIBS'].split() - self.set_info(**info) - return True - - def _calc_info(self, name): - return getattr(self, '_calc_info_{}'.format(name))() - - def calc_info(self): - lapack_order, unknown_order = _parse_env_order(self.lapack_order, self.order_env_var_name) - if len(unknown_order) > 0: - raise ValueError("lapack_opt_info user defined " - "LAPACK order has unacceptable " - "values: {}".format(unknown_order)) - - if 'NPY_LAPACK_LIBS' in os.environ: - # Bypass autodetection, set language to F77 and use env var linker - # flags directly - self._calc_info_from_envvar() - return - - for lapack in lapack_order: - if self._calc_info(lapack): - return - - if 'lapack' not in lapack_order: - # Since the user may request *not* to use any library, we still need - # to raise warnings to signal missing packages! - warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2) - warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2) - - -class _ilp64_opt_info_mixin: - symbol_suffix = None - symbol_prefix = None - - def _check_info(self, info): - macros = dict(info.get('define_macros', [])) - prefix = macros.get('BLAS_SYMBOL_PREFIX', '') - suffix = macros.get('BLAS_SYMBOL_SUFFIX', '') - - if self.symbol_prefix not in (None, prefix): - return False - - if self.symbol_suffix not in (None, suffix): - return False - - return bool(info) - - -class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin): - notfounderror = LapackILP64NotFoundError - lapack_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] - order_env_var_name = 'NPY_LAPACK_ILP64_ORDER' - - def _calc_info(self, name): - print('lapack_ilp64_opt_info._calc_info(name=%s)' % (name)) - info = get_info(name + '_lapack') - if self._check_info(info): - self.set_info(**info) - return True - else: - print('%s_lapack does not exist' % (name)) - return False - - -class lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info): - # Same as lapack_ilp64_opt_info, but fix symbol names - symbol_prefix = '' - symbol_suffix = '' - - -class lapack64__opt_info(lapack_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '64_' - - -class blas_opt_info(system_info): - notfounderror = BlasNotFoundError - # List of all known BLAS libraries, in the default order - - blas_order = ['armpl', 'mkl', 'ssl2', 'blis', 'openblas', - 'accelerate', 'atlas', 'blas'] - order_env_var_name = 'NPY_BLAS_ORDER' - - def _calc_info_armpl(self): - info = get_info('blas_armpl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_mkl(self): - info = get_info('blas_mkl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_ssl2(self): - info = get_info('blas_ssl2') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_blis(self): - info = get_info('blis') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_openblas(self): - info = get_info('openblas') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_atlas(self): - info = get_info('atlas_3_10_blas_threads') - if not info: - info = get_info('atlas_3_10_blas') - if not info: - info = get_info('atlas_blas_threads') - if not info: - info = get_info('atlas_blas') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_blas(self): - # Warn about a non-optimized BLAS library - warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3) - info = {} - dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) - - blas = get_info('blas') - if blas: - dict_append(info, **blas) - else: - # Not even BLAS was found! - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) - - blas_src = get_info('blas_src') - if not blas_src: - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) - return False - dict_append(info, libraries=[('fblas_src', blas_src)]) - - self.set_info(**info) - return True - - def _calc_info_from_envvar(self): - info = {} - info['language'] = 'f77' - info['libraries'] = [] - info['include_dirs'] = [] - info['define_macros'] = [] - info['extra_link_args'] = os.environ['NPY_BLAS_LIBS'].split() - if 'NPY_CBLAS_LIBS' in os.environ: - info['define_macros'].append(('HAVE_CBLAS', None)) - info['extra_link_args'].extend( - os.environ['NPY_CBLAS_LIBS'].split()) - self.set_info(**info) - return True - - def _calc_info(self, name): - return getattr(self, '_calc_info_{}'.format(name))() - - def calc_info(self): - blas_order, unknown_order = _parse_env_order(self.blas_order, self.order_env_var_name) - if len(unknown_order) > 0: - raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(unknown_order)) - - if 'NPY_BLAS_LIBS' in os.environ: - # Bypass autodetection, set language to F77 and use env var linker - # flags directly - self._calc_info_from_envvar() - return - - for blas in blas_order: - if self._calc_info(blas): - return - - if 'blas' not in blas_order: - # Since the user may request *not* to use any library, we still need - # to raise warnings to signal missing packages! - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2) - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2) - - -class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin): - notfounderror = BlasILP64NotFoundError - blas_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] - order_env_var_name = 'NPY_BLAS_ILP64_ORDER' - - def _calc_info(self, name): - info = get_info(name) - if self._check_info(info): - self.set_info(**info) - return True - return False - - -class blas_ilp64_plain_opt_info(blas_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '' - - -class blas64__opt_info(blas_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '64_' - - -class cblas_info(system_info): - section = 'cblas' - dir_env_var = 'CBLAS' - # No default as it's used only in blas_info - _lib_names = [] - notfounderror = BlasNotFoundError - - -class blas_info(system_info): - section = 'blas' - dir_env_var = 'BLAS' - _lib_names = ['blas'] - notfounderror = BlasNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - opt = self.get_option_single('blas_libs', 'libraries') - blas_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, blas_libs, []) - if info is None: - return - else: - info['include_dirs'] = self.get_include_dirs() - if platform.system() == 'Windows': - # The check for windows is needed because get_cblas_libs uses the - # same compiler that was used to compile Python and msvc is - # often not installed when mingw is being used. This rough - # treatment is not desirable, but windows is tricky. - info['language'] = 'f77' # XXX: is it generally true? - # If cblas is given as an option, use those - cblas_info_obj = cblas_info() - cblas_opt = cblas_info_obj.get_option_single('cblas_libs', 'libraries') - cblas_libs = cblas_info_obj.get_libs(cblas_opt, None) - if cblas_libs: - info['libraries'] = cblas_libs + blas_libs - info['define_macros'] = [('HAVE_CBLAS', None)] - else: - lib = self.get_cblas_libs(info) - if lib is not None: - info['language'] = 'c' - info['libraries'] = lib - info['define_macros'] = [('HAVE_CBLAS', None)] - self.set_info(**info) - - def get_cblas_libs(self, info): - """ Check whether we can link with CBLAS interface - - This method will search through several combinations of libraries - to check whether CBLAS is present: - - 1. Libraries in ``info['libraries']``, as is - 2. As 1. but also explicitly adding ``'cblas'`` as a library - 3. As 1. but also explicitly adding ``'blas'`` as a library - 4. Check only library ``'cblas'`` - 5. Check only library ``'blas'`` - - Parameters - ---------- - info : dict - system information dictionary for compilation and linking - - Returns - ------- - libraries : list of str or None - a list of libraries that enables the use of CBLAS interface. - Returns None if not found or a compilation error occurs. - - Since 1.17 returns a list. - """ - # primitive cblas check by looking for the header and trying to link - # cblas or blas - c = customized_ccompiler() - tmpdir = tempfile.mkdtemp() - s = textwrap.dedent("""\ - #include - int main(int argc, const char *argv[]) - { - double a[4] = {1,2,3,4}; - double b[4] = {5,6,7,8}; - return cblas_ddot(4, a, 1, b, 1) > 10; - }""") - src = os.path.join(tmpdir, 'source.c') - try: - with open(src, 'w') as f: - f.write(s) - - try: - # check we can compile (find headers) - obj = c.compile([src], output_dir=tmpdir, - include_dirs=self.get_include_dirs()) - except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError): - return None - - # check we can link (find library) - # some systems have separate cblas and blas libs. - for libs in [info['libraries'], ['cblas'] + info['libraries'], - ['blas'] + info['libraries'], ['cblas'], ['blas']]: - try: - c.link_executable(obj, os.path.join(tmpdir, "a.out"), - libraries=libs, - library_dirs=info['library_dirs'], - extra_postargs=info.get('extra_link_args', [])) - return libs - except distutils.ccompiler.LinkError: - pass - finally: - shutil.rmtree(tmpdir) - return None - - -class openblas_info(blas_info): - section = 'openblas' - dir_env_var = 'OPENBLAS' - _lib_names = ['openblas'] - _require_symbols = [] - notfounderror = BlasNotFoundError - - @property - def symbol_prefix(self): - try: - return self.cp.get(self.section, 'symbol_prefix') - except NoOptionError: - return '' - - @property - def symbol_suffix(self): - try: - return self.cp.get(self.section, 'symbol_suffix') - except NoOptionError: - return '' - - def _calc_info(self): - c = customized_ccompiler() - - lib_dirs = self.get_lib_dirs() - - # Prefer to use libraries over openblas_libs - opt = self.get_option_single('openblas_libs', 'libraries') - openblas_libs = self.get_libs(opt, self._lib_names) - - info = self.check_libs(lib_dirs, openblas_libs, []) - - if c.compiler_type == "msvc" and info is None: - from numpy.distutils.fcompiler import new_fcompiler - f = new_fcompiler(c_compiler=c) - if f and f.compiler_type == 'gnu95': - # Try gfortran-compatible library files - info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs) - # Skip lapack check, we'd need build_ext to do it - skip_symbol_check = True - elif info: - skip_symbol_check = False - info['language'] = 'c' - - if info is None: - return None - - # Add extra info for OpenBLAS - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - - if not (skip_symbol_check or self.check_symbols(info)): - return None - - info['define_macros'] = [('HAVE_CBLAS', None)] - if self.symbol_prefix: - info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)] - if self.symbol_suffix: - info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)] - - return info - - def calc_info(self): - info = self._calc_info() - if info is not None: - self.set_info(**info) - - def check_msvc_gfortran_libs(self, library_dirs, libraries): - # First, find the full path to each library directory - library_paths = [] - for library in libraries: - for library_dir in library_dirs: - # MinGW static ext will be .a - fullpath = os.path.join(library_dir, library + '.a') - if os.path.isfile(fullpath): - library_paths.append(fullpath) - break - else: - return None - - # Generate numpy.distutils virtual static library file - basename = self.__class__.__name__ - tmpdir = os.path.join(os.getcwd(), 'build', basename) - if not os.path.isdir(tmpdir): - os.makedirs(tmpdir) - - info = {'library_dirs': [tmpdir], - 'libraries': [basename], - 'language': 'f77'} - - fake_lib_file = os.path.join(tmpdir, basename + '.fobjects') - fake_clib_file = os.path.join(tmpdir, basename + '.cobjects') - with open(fake_lib_file, 'w') as f: - f.write("\n".join(library_paths)) - with open(fake_clib_file, 'w') as f: - pass - - return info - - def check_symbols(self, info): - res = False - c = customized_ccompiler() - - tmpdir = tempfile.mkdtemp() - - prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix, - symbol_name, - self.symbol_suffix) - for symbol_name in self._require_symbols) - calls = "\n".join("%s%s%s();" % (self.symbol_prefix, - symbol_name, - self.symbol_suffix) - for symbol_name in self._require_symbols) - s = textwrap.dedent("""\ - %(prototypes)s - int main(int argc, const char *argv[]) - { - %(calls)s - return 0; - }""") % dict(prototypes=prototypes, calls=calls) - src = os.path.join(tmpdir, 'source.c') - out = os.path.join(tmpdir, 'a.out') - # Add the additional "extra" arguments - try: - extra_args = info['extra_link_args'] - except Exception: - extra_args = [] - try: - with open(src, 'w') as f: - f.write(s) - obj = c.compile([src], output_dir=tmpdir) - try: - c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs'], - extra_postargs=extra_args) - res = True - except distutils.ccompiler.LinkError: - res = False - finally: - shutil.rmtree(tmpdir) - return res - -class openblas_lapack_info(openblas_info): - section = 'openblas' - dir_env_var = 'OPENBLAS' - _lib_names = ['openblas'] - _require_symbols = ['zungqr_'] - notfounderror = BlasNotFoundError - -class openblas_clapack_info(openblas_lapack_info): - _lib_names = ['openblas', 'lapack'] - -class openblas_ilp64_info(openblas_info): - section = 'openblas_ilp64' - dir_env_var = 'OPENBLAS_ILP64' - _lib_names = ['openblas64'] - _require_symbols = ['dgemm_', 'cblas_dgemm'] - notfounderror = BlasILP64NotFoundError - - def _calc_info(self): - info = super()._calc_info() - if info is not None: - info['define_macros'] += [('HAVE_BLAS_ILP64', None)] - return info - -class openblas_ilp64_lapack_info(openblas_ilp64_info): - _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr'] - - def _calc_info(self): - info = super()._calc_info() - if info: - info['define_macros'] += [('HAVE_LAPACKE', None)] - return info - -class openblas64__info(openblas_ilp64_info): - # ILP64 Openblas, with default symbol suffix - section = 'openblas64_' - dir_env_var = 'OPENBLAS64_' - _lib_names = ['openblas64_'] - symbol_suffix = '64_' - symbol_prefix = '' - -class openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info): - pass - -class blis_info(blas_info): - section = 'blis' - dir_env_var = 'BLIS' - _lib_names = ['blis'] - notfounderror = BlasNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - opt = self.get_option_single('blis_libs', 'libraries') - blis_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs2(lib_dirs, blis_libs, []) - if info is None: - return - - # Add include dirs - incl_dirs = self.get_include_dirs() - dict_append(info, - language='c', - define_macros=[('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - self.set_info(**info) - - -class flame_info(system_info): - """ Usage of libflame for LAPACK operations - - This requires libflame to be compiled with lapack wrappers: - - ./configure --enable-lapack2flame ... - - Be aware that libflame 5.1.0 has some missing names in the shared library, so - if you have problems, try the static flame library. - """ - section = 'flame' - _lib_names = ['flame'] - notfounderror = FlameNotFoundError - - def check_embedded_lapack(self, info): - """ libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """ - c = customized_ccompiler() - - tmpdir = tempfile.mkdtemp() - s = textwrap.dedent("""\ - void zungqr_(); - int main(int argc, const char *argv[]) - { - zungqr_(); - return 0; - }""") - src = os.path.join(tmpdir, 'source.c') - out = os.path.join(tmpdir, 'a.out') - # Add the additional "extra" arguments - extra_args = info.get('extra_link_args', []) - try: - with open(src, 'w') as f: - f.write(s) - obj = c.compile([src], output_dir=tmpdir) - try: - c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs'], - extra_postargs=extra_args) - return True - except distutils.ccompiler.LinkError: - return False - finally: - shutil.rmtree(tmpdir) - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - flame_libs = self.get_libs('libraries', self._lib_names) - - info = self.check_libs2(lib_dirs, flame_libs, []) - if info is None: - return - - # Add the extra flag args to info - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - - if self.check_embedded_lapack(info): - # check if the user has supplied all information required - self.set_info(**info) - else: - # Try and get the BLAS lib to see if we can get it to work - blas_info = get_info('blas_opt') - if not blas_info: - # since we already failed once, this ain't going to work either - return - - # Now we need to merge the two dictionaries - for key in blas_info: - if isinstance(blas_info[key], list): - info[key] = info.get(key, []) + blas_info[key] - elif isinstance(blas_info[key], tuple): - info[key] = info.get(key, ()) + blas_info[key] - else: - info[key] = info.get(key, '') + blas_info[key] - - # Now check again - if self.check_embedded_lapack(info): - self.set_info(**info) - - -class accelerate_info(system_info): - section = 'accelerate' - _lib_names = ['accelerate', 'veclib'] - notfounderror = BlasNotFoundError - - def calc_info(self): - # Make possible to enable/disable from config file/env var - libraries = os.environ.get('ACCELERATE') - if libraries: - libraries = [libraries] - else: - libraries = self.get_libs('libraries', self._lib_names) - libraries = [lib.strip().lower() for lib in libraries] - - if (sys.platform == 'darwin' and - not os.getenv('_PYTHON_HOST_PLATFORM', None)): - # Use the system BLAS from Accelerate or vecLib under OSX - args = [] - link_args = [] - if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ - 'x86_64' in get_platform() or \ - 'i386' in platform.platform(): - intel = 1 - else: - intel = 0 - if (os.path.exists('/System/Library/Frameworks' - '/Accelerate.framework/') and - 'accelerate' in libraries): - if intel: - args.extend(['-msse3']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) - elif (os.path.exists('/System/Library/Frameworks' - '/vecLib.framework/') and - 'veclib' in libraries): - if intel: - args.extend(['-msse3']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,vecLib']) - - if args: - macros = [ - ('NO_ATLAS_INFO', 3), - ('HAVE_CBLAS', None), - ('ACCELERATE_NEW_LAPACK', None), - ] - if(os.getenv('NPY_USE_BLAS_ILP64', None)): - print('Setting HAVE_BLAS_ILP64') - macros += [ - ('HAVE_BLAS_ILP64', None), - ('ACCELERATE_LAPACK_ILP64', None), - ] - self.set_info(extra_compile_args=args, - extra_link_args=link_args, - define_macros=macros) - - return - -class accelerate_lapack_info(accelerate_info): - def _calc_info(self): - return super()._calc_info() - -class blas_src_info(system_info): - # BLAS_SRC is deprecated, please do not use this! - # Build or install a BLAS library via your package manager or from - # source separately. - section = 'blas_src' - dir_env_var = 'BLAS_SRC' - notfounderror = BlasSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['blas'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'daxpy.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - blas1 = ''' - caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot - dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 - srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg - dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax - snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap - scabs1 - ''' - blas2 = ''' - cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv - chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv - dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv - sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger - stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc - zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 - ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv - ''' - blas3 = ''' - cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k - dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm - ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm - ''' - sources = [os.path.join(src_dir, f + '.f') \ - for f in (blas1 + blas2 + blas3).split()] - #XXX: should we check here actual existence of source files? - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources': sources, 'language': 'f77'} - self.set_info(**info) - - -class x11_info(system_info): - section = 'x11' - notfounderror = X11NotFoundError - _lib_names = ['X11'] - - def __init__(self): - system_info.__init__(self, - default_lib_dirs=default_x11_lib_dirs, - default_include_dirs=default_x11_include_dirs) - - def calc_info(self): - if sys.platform in ['win32']: - return - lib_dirs = self.get_lib_dirs() - include_dirs = self.get_include_dirs() - opt = self.get_option_single('x11_libs', 'libraries') - x11_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, x11_libs, []) - if info is None: - return - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, 'X11/X.h'): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - self.set_info(**info) - - -class _numpy_info(system_info): - section = 'Numeric' - modulename = 'Numeric' - notfounderror = NumericNotFoundError - - def __init__(self): - include_dirs = [] - try: - module = __import__(self.modulename) - prefix = [] - for name in module.__file__.split(os.sep): - if name == 'lib': - break - prefix.append(name) - - # Ask numpy for its own include path before attempting - # anything else - try: - include_dirs.append(getattr(module, 'get_include')()) - except AttributeError: - pass - - include_dirs.append(sysconfig.get_path('include')) - except ImportError: - pass - py_incl_dir = sysconfig.get_path('include') - include_dirs.append(py_incl_dir) - py_pincl_dir = sysconfig.get_path('platinclude') - if py_pincl_dir not in include_dirs: - include_dirs.append(py_pincl_dir) - for d in default_include_dirs: - d = os.path.join(d, os.path.basename(py_incl_dir)) - if d not in include_dirs: - include_dirs.append(d) - system_info.__init__(self, - default_lib_dirs=[], - default_include_dirs=include_dirs) - - def calc_info(self): - try: - module = __import__(self.modulename) - except ImportError: - return - info = {} - macros = [] - for v in ['__version__', 'version']: - vrs = getattr(module, v, None) - if vrs is None: - continue - macros = [(self.modulename.upper() + '_VERSION', - _c_string_literal(vrs)), - (self.modulename.upper(), None)] - break - dict_append(info, define_macros=macros) - include_dirs = self.get_include_dirs() - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, - os.path.join(self.modulename, - 'arrayobject.h')): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - if info: - self.set_info(**info) - return - - -class numarray_info(_numpy_info): - section = 'numarray' - modulename = 'numarray' - - -class Numeric_info(_numpy_info): - section = 'Numeric' - modulename = 'Numeric' - - -class numpy_info(_numpy_info): - section = 'numpy' - modulename = 'numpy' - - -class numerix_info(system_info): - section = 'numerix' - - def calc_info(self): - which = None, None - if os.getenv("NUMERIX"): - which = os.getenv("NUMERIX"), "environment var" - # If all the above fail, default to numpy. - if which[0] is None: - which = "numpy", "defaulted" - try: - import numpy # noqa: F401 - which = "numpy", "defaulted" - except ImportError as e: - msg1 = str(e) - try: - import Numeric # noqa: F401 - which = "numeric", "defaulted" - except ImportError as e: - msg2 = str(e) - try: - import numarray # noqa: F401 - which = "numarray", "defaulted" - except ImportError as e: - msg3 = str(e) - log.info(msg1) - log.info(msg2) - log.info(msg3) - which = which[0].strip().lower(), which[1] - if which[0] not in ["numeric", "numarray", "numpy"]: - raise ValueError("numerix selector must be either 'Numeric' " - "or 'numarray' or 'numpy' but the value obtained" - " from the %s was '%s'." % (which[1], which[0])) - os.environ['NUMERIX'] = which[0] - self.set_info(**get_info(which[0])) - - -class f2py_info(system_info): - def calc_info(self): - try: - import numpy.f2py as f2py - except ImportError: - return - f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src') - self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')], - include_dirs=[f2py_dir]) - return - - -class boost_python_info(system_info): - section = 'boost_python' - dir_env_var = 'BOOST' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['boost*'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'libs', 'python', 'src', - 'module.cpp')): - src_dir = d - break - if not src_dir: - return - py_incl_dirs = [sysconfig.get_path('include')] - py_pincl_dir = sysconfig.get_path('platinclude') - if py_pincl_dir not in py_incl_dirs: - py_incl_dirs.append(py_pincl_dir) - srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src') - bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp')) - bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp')) - info = {'libraries': [('boost_python_src', - {'include_dirs': [src_dir] + py_incl_dirs, - 'sources':bpl_srcs} - )], - 'include_dirs': [src_dir], - } - if info: - self.set_info(**info) - return - - -class agg2_info(system_info): - section = 'agg2' - dir_env_var = 'AGG2' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['agg2*'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')): - src_dir = d - break - if not src_dir: - return - if sys.platform == 'win32': - agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform', - 'win32', 'agg_win32_bmp.cpp')) - else: - agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp')) - agg2_srcs += [os.path.join(src_dir, 'src', 'platform', - 'X11', - 'agg_platform_support.cpp')] - - info = {'libraries': - [('agg2_src', - {'sources': agg2_srcs, - 'include_dirs': [os.path.join(src_dir, 'include')], - } - )], - 'include_dirs': [os.path.join(src_dir, 'include')], - } - if info: - self.set_info(**info) - return - - -class _pkg_config_info(system_info): - section = None - config_env_var = 'PKG_CONFIG' - default_config_exe = 'pkg-config' - append_config_exe = '' - version_macro_name = None - release_macro_name = None - version_flag = '--modversion' - cflags_flag = '--cflags' - - def get_config_exe(self): - if self.config_env_var in os.environ: - return os.environ[self.config_env_var] - return self.default_config_exe - - def get_config_output(self, config_exe, option): - cmd = config_exe + ' ' + self.append_config_exe + ' ' + option - try: - o = subprocess.check_output(cmd) - except (OSError, subprocess.CalledProcessError): - pass - else: - o = filepath_from_subprocess_output(o) - return o - - def calc_info(self): - config_exe = find_executable(self.get_config_exe()) - if not config_exe: - log.warn('File not found: %s. Cannot determine %s info.' \ - % (config_exe, self.section)) - return - info = {} - macros = [] - libraries = [] - library_dirs = [] - include_dirs = [] - extra_link_args = [] - extra_compile_args = [] - version = self.get_config_output(config_exe, self.version_flag) - if version: - macros.append((self.__class__.__name__.split('.')[-1].upper(), - _c_string_literal(version))) - if self.version_macro_name: - macros.append((self.version_macro_name + '_%s' - % (version.replace('.', '_')), None)) - if self.release_macro_name: - release = self.get_config_output(config_exe, '--release') - if release: - macros.append((self.release_macro_name + '_%s' - % (release.replace('.', '_')), None)) - opts = self.get_config_output(config_exe, '--libs') - if opts: - for opt in opts.split(): - if opt[:2] == '-l': - libraries.append(opt[2:]) - elif opt[:2] == '-L': - library_dirs.append(opt[2:]) - else: - extra_link_args.append(opt) - opts = self.get_config_output(config_exe, self.cflags_flag) - if opts: - for opt in opts.split(): - if opt[:2] == '-I': - include_dirs.append(opt[2:]) - elif opt[:2] == '-D': - if '=' in opt: - n, v = opt[2:].split('=') - macros.append((n, v)) - else: - macros.append((opt[2:], None)) - else: - extra_compile_args.append(opt) - if macros: - dict_append(info, define_macros=macros) - if libraries: - dict_append(info, libraries=libraries) - if library_dirs: - dict_append(info, library_dirs=library_dirs) - if include_dirs: - dict_append(info, include_dirs=include_dirs) - if extra_link_args: - dict_append(info, extra_link_args=extra_link_args) - if extra_compile_args: - dict_append(info, extra_compile_args=extra_compile_args) - if info: - self.set_info(**info) - return - - -class wx_info(_pkg_config_info): - section = 'wx' - config_env_var = 'WX_CONFIG' - default_config_exe = 'wx-config' - append_config_exe = '' - version_macro_name = 'WX_VERSION' - release_macro_name = 'WX_RELEASE' - version_flag = '--version' - cflags_flag = '--cxxflags' - - -class gdk_pixbuf_xlib_2_info(_pkg_config_info): - section = 'gdk_pixbuf_xlib_2' - append_config_exe = 'gdk-pixbuf-xlib-2.0' - version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' - - -class gdk_pixbuf_2_info(_pkg_config_info): - section = 'gdk_pixbuf_2' - append_config_exe = 'gdk-pixbuf-2.0' - version_macro_name = 'GDK_PIXBUF_VERSION' - - -class gdk_x11_2_info(_pkg_config_info): - section = 'gdk_x11_2' - append_config_exe = 'gdk-x11-2.0' - version_macro_name = 'GDK_X11_VERSION' - - -class gdk_2_info(_pkg_config_info): - section = 'gdk_2' - append_config_exe = 'gdk-2.0' - version_macro_name = 'GDK_VERSION' - - -class gdk_info(_pkg_config_info): - section = 'gdk' - append_config_exe = 'gdk' - version_macro_name = 'GDK_VERSION' - - -class gtkp_x11_2_info(_pkg_config_info): - section = 'gtkp_x11_2' - append_config_exe = 'gtk+-x11-2.0' - version_macro_name = 'GTK_X11_VERSION' - - -class gtkp_2_info(_pkg_config_info): - section = 'gtkp_2' - append_config_exe = 'gtk+-2.0' - version_macro_name = 'GTK_VERSION' - - -class xft_info(_pkg_config_info): - section = 'xft' - append_config_exe = 'xft' - version_macro_name = 'XFT_VERSION' - - -class freetype2_info(_pkg_config_info): - section = 'freetype2' - append_config_exe = 'freetype2' - version_macro_name = 'FREETYPE2_VERSION' - - -class amd_info(system_info): - section = 'amd' - dir_env_var = 'AMD' - _lib_names = ['amd'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('amd_libs', 'libraries') - amd_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, amd_libs, []) - if info is None: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d, 'amd.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_AMD_H', None)], - swig_opts=['-I' + inc_dir]) - - self.set_info(**info) - return - - -class umfpack_info(system_info): - section = 'umfpack' - dir_env_var = 'UMFPACK' - notfounderror = UmfpackNotFoundError - _lib_names = ['umfpack'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('umfpack_libs', 'libraries') - umfpack_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, umfpack_libs, []) - if info is None: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_UMFPACK_H', None)], - swig_opts=['-I' + inc_dir]) - - dict_append(info, **get_info('amd')) - - self.set_info(**info) - return - - -def combine_paths(*args, **kws): - """ Return a list of existing paths composed by all combinations of - items from arguments. - """ - r = [] - for a in args: - if not a: - continue - if is_string(a): - a = [a] - r.append(a) - args = r - if not args: - return [] - if len(args) == 1: - result = reduce(lambda a, b: a + b, map(glob, args[0]), []) - elif len(args) == 2: - result = [] - for a0 in args[0]: - for a1 in args[1]: - result.extend(glob(os.path.join(a0, a1))) - else: - result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:])) - log.debug('(paths: %s)', ','.join(result)) - return result - -language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3} -inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'} - - -def dict_append(d, **kws): - languages = [] - for k, v in kws.items(): - if k == 'language': - languages.append(v) - continue - if k in d: - if k in ['library_dirs', 'include_dirs', - 'extra_compile_args', 'extra_link_args', - 'runtime_library_dirs', 'define_macros']: - [d[k].append(vv) for vv in v if vv not in d[k]] - else: - d[k].extend(v) - else: - d[k] = v - if languages: - l = inv_language_map[max([language_map.get(l, 0) for l in languages])] - d['language'] = l - return - - -def parseCmdLine(argv=(None,)): - import optparse - parser = optparse.OptionParser("usage: %prog [-v] [info objs]") - parser.add_option('-v', '--verbose', action='store_true', dest='verbose', - default=False, - help='be verbose and print more messages') - - opts, args = parser.parse_args(args=argv[1:]) - return opts, args - - -def show_all(argv=None): - import inspect - if argv is None: - argv = sys.argv - opts, args = parseCmdLine(argv) - if opts.verbose: - log.set_threshold(log.DEBUG) - else: - log.set_threshold(log.INFO) - show_only = [] - for n in args: - if n[-5:] != '_info': - n = n + '_info' - show_only.append(n) - show_all = not show_only - _gdict_ = globals().copy() - for name, c in _gdict_.items(): - if not inspect.isclass(c): - continue - if not issubclass(c, system_info) or c is system_info: - continue - if not show_all: - if name not in show_only: - continue - del show_only[show_only.index(name)] - conf = c() - conf.verbosity = 2 - # we don't need the result, but we want - # the side effect of printing diagnostics - conf.get_info() - if show_only: - log.info('Info classes not defined: %s', ','.join(show_only)) - -if __name__ == "__main__": - show_all() diff --git a/numpy/distutils/tests/__init__.py b/numpy/distutils/tests/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/numpy/distutils/tests/test_build_ext.py b/numpy/distutils/tests/test_build_ext.py deleted file mode 100644 index 7124cc407a2f..000000000000 --- a/numpy/distutils/tests/test_build_ext.py +++ /dev/null @@ -1,74 +0,0 @@ -'''Tests for numpy.distutils.build_ext.''' - -import os -import subprocess -import sys -from textwrap import indent, dedent -import pytest -from numpy.testing import IS_WASM - -@pytest.mark.skipif(IS_WASM, reason="cannot start subprocess in wasm") -@pytest.mark.slow -def test_multi_fortran_libs_link(tmp_path): - ''' - Ensures multiple "fake" static libraries are correctly linked. - see gh-18295 - ''' - - # We need to make sure we actually have an f77 compiler. - # This is nontrivial, so we'll borrow the utilities - # from f2py tests: - from numpy.distutils.tests.utilities import has_f77_compiler - if not has_f77_compiler(): - pytest.skip('No F77 compiler found') - - # make some dummy sources - with open(tmp_path / '_dummy1.f', 'w') as fid: - fid.write(indent(dedent('''\ - FUNCTION dummy_one() - RETURN - END FUNCTION'''), prefix=' '*6)) - with open(tmp_path / '_dummy2.f', 'w') as fid: - fid.write(indent(dedent('''\ - FUNCTION dummy_two() - RETURN - END FUNCTION'''), prefix=' '*6)) - with open(tmp_path / '_dummy.c', 'w') as fid: - # doesn't need to load - just needs to exist - fid.write('int PyInit_dummyext;') - - # make a setup file - with open(tmp_path / 'setup.py', 'w') as fid: - srctree = os.path.join(os.path.dirname(__file__), '..', '..', '..') - fid.write(dedent(f'''\ - def configuration(parent_package="", top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration("", parent_package, top_path) - config.add_library("dummy1", sources=["_dummy1.f"]) - config.add_library("dummy2", sources=["_dummy2.f"]) - config.add_extension("dummyext", sources=["_dummy.c"], libraries=["dummy1", "dummy2"]) - return config - - - if __name__ == "__main__": - import sys - sys.path.insert(0, r"{srctree}") - from numpy.distutils.core import setup - setup(**configuration(top_path="").todict())''')) - - # build the test extension and "install" into a temporary directory - build_dir = tmp_path - subprocess.check_call([sys.executable, 'setup.py', 'build', 'install', - '--prefix', str(tmp_path / 'installdir'), - '--record', str(tmp_path / 'tmp_install_log.txt'), - ], - cwd=str(build_dir), - ) - # get the path to the so - so = None - with open(tmp_path /'tmp_install_log.txt') as fid: - for line in fid: - if 'dummyext' in line: - so = line.strip() - break - assert so is not None diff --git a/numpy/distutils/tests/test_ccompiler_opt.py b/numpy/distutils/tests/test_ccompiler_opt.py deleted file mode 100644 index 3714aea0e12e..000000000000 --- a/numpy/distutils/tests/test_ccompiler_opt.py +++ /dev/null @@ -1,808 +0,0 @@ -import re, textwrap, os -from os import sys, path -from distutils.errors import DistutilsError - -is_standalone = __name__ == '__main__' and __package__ is None -if is_standalone: - import unittest, contextlib, tempfile, shutil - sys.path.append(path.abspath(path.join(path.dirname(__file__), ".."))) - from ccompiler_opt import CCompilerOpt - - # from numpy/testing/_private/utils.py - @contextlib.contextmanager - def tempdir(*args, **kwargs): - tmpdir = tempfile.mkdtemp(*args, **kwargs) - try: - yield tmpdir - finally: - shutil.rmtree(tmpdir) - - def assert_(expr, msg=''): - if not expr: - raise AssertionError(msg) -else: - from numpy.distutils.ccompiler_opt import CCompilerOpt - from numpy.testing import assert_, tempdir - -# architectures and compilers to test -arch_compilers = dict( - x86 = ("gcc", "clang", "icc", "iccw", "msvc"), - x64 = ("gcc", "clang", "icc", "iccw", "msvc"), - ppc64 = ("gcc", "clang"), - ppc64le = ("gcc", "clang"), - armhf = ("gcc", "clang"), - aarch64 = ("gcc", "clang", "fcc"), - s390x = ("gcc", "clang"), - noarch = ("gcc",) -) - -class FakeCCompilerOpt(CCompilerOpt): - fake_info = "" - def __init__(self, trap_files="", trap_flags="", *args, **kwargs): - self.fake_trap_files = trap_files - self.fake_trap_flags = trap_flags - CCompilerOpt.__init__(self, None, **kwargs) - - def __repr__(self): - return textwrap.dedent("""\ - <<<< - march : {} - compiler : {} - ---------------- - {} - >>>> - """).format(self.cc_march, self.cc_name, self.report()) - - def dist_compile(self, sources, flags, **kwargs): - assert(isinstance(sources, list)) - assert(isinstance(flags, list)) - if self.fake_trap_files: - for src in sources: - if re.match(self.fake_trap_files, src): - self.dist_error("source is trapped by a fake interface") - if self.fake_trap_flags: - for f in flags: - if re.match(self.fake_trap_flags, f): - self.dist_error("flag is trapped by a fake interface") - # fake objects - return zip(sources, [' '.join(flags)] * len(sources)) - - def dist_info(self): - return FakeCCompilerOpt.fake_info - - @staticmethod - def dist_log(*args, stderr=False): - pass - -class _Test_CCompilerOpt: - arch = None # x86_64 - cc = None # gcc - - def setup_class(self): - FakeCCompilerOpt.conf_nocache = True - self._opt = None - - def nopt(self, *args, **kwargs): - FakeCCompilerOpt.fake_info = (self.arch, self.cc, "") - return FakeCCompilerOpt(*args, **kwargs) - - def opt(self): - if not self._opt: - self._opt = self.nopt() - return self._opt - - def march(self): - return self.opt().cc_march - - def cc_name(self): - return self.opt().cc_name - - def get_targets(self, targets, groups, **kwargs): - FakeCCompilerOpt.conf_target_groups = groups - opt = self.nopt( - cpu_baseline=kwargs.get("baseline", "min"), - cpu_dispatch=kwargs.get("dispatch", "max"), - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - with tempdir() as tmpdir: - file = os.path.join(tmpdir, "test_targets.c") - with open(file, 'w') as f: - f.write(targets) - gtargets = [] - gflags = {} - fake_objects = opt.try_dispatch([file]) - for source, flags in fake_objects: - gtar = path.basename(source).split('.')[1:-1] - glen = len(gtar) - if glen == 0: - gtar = "baseline" - elif glen == 1: - gtar = gtar[0].upper() - else: - # converting multi-target into parentheses str format to be equivalent - # to the configuration statements syntax. - gtar = ('('+' '.join(gtar)+')').upper() - gtargets.append(gtar) - gflags[gtar] = flags - - has_baseline, targets = opt.sources_status[file] - targets = targets + ["baseline"] if has_baseline else targets - # convert tuple that represent multi-target into parentheses str format - targets = [ - '('+' '.join(tar)+')' if isinstance(tar, tuple) else tar - for tar in targets - ] - if len(targets) != len(gtargets) or not all(t in gtargets for t in targets): - raise AssertionError( - "'sources_status' returns different targets than the compiled targets\n" - "%s != %s" % (targets, gtargets) - ) - # return targets from 'sources_status' since the order is matters - return targets, gflags - - def arg_regex(self, **kwargs): - map2origin = dict( - x64 = "x86", - ppc64le = "ppc64", - aarch64 = "armhf", - clang = "gcc", - ) - march = self.march(); cc_name = self.cc_name() - map_march = map2origin.get(march, march) - map_cc = map2origin.get(cc_name, cc_name) - for key in ( - march, cc_name, map_march, map_cc, - march + '_' + cc_name, - map_march + '_' + cc_name, - march + '_' + map_cc, - map_march + '_' + map_cc, - ) : - regex = kwargs.pop(key, None) - if regex is not None: - break - if regex: - if isinstance(regex, dict): - for k, v in regex.items(): - if v[-1:] not in ')}$?\\.+*': - regex[k] = v + '$' - else: - assert(isinstance(regex, str)) - if regex[-1:] not in ')}$?\\.+*': - regex += '$' - return regex - - def expect(self, dispatch, baseline="", **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - opt = self.nopt( - cpu_baseline=baseline, cpu_dispatch=dispatch, - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - features = ' '.join(opt.cpu_dispatch_names()) - if not match: - if len(features) != 0: - raise AssertionError( - 'expected empty features, not "%s"' % features - ) - return - if not re.match(match, features, re.IGNORECASE): - raise AssertionError( - 'dispatch features "%s" not match "%s"' % (features, match) - ) - - def expect_baseline(self, baseline, dispatch="", **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - opt = self.nopt( - cpu_baseline=baseline, cpu_dispatch=dispatch, - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - features = ' '.join(opt.cpu_baseline_names()) - if not match: - if len(features) != 0: - raise AssertionError( - 'expected empty features, not "%s"' % features - ) - return - if not re.match(match, features, re.IGNORECASE): - raise AssertionError( - 'baseline features "%s" not match "%s"' % (features, match) - ) - - def expect_flags(self, baseline, dispatch="", **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - opt = self.nopt( - cpu_baseline=baseline, cpu_dispatch=dispatch, - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - flags = ' '.join(opt.cpu_baseline_flags()) - if not match: - if len(flags) != 0: - raise AssertionError( - 'expected empty flags not "%s"' % flags - ) - return - if not re.match(match, flags): - raise AssertionError( - 'flags "%s" not match "%s"' % (flags, match) - ) - - def expect_targets(self, targets, groups={}, **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - targets, _ = self.get_targets(targets=targets, groups=groups, **kwargs) - targets = ' '.join(targets) - if not match: - if len(targets) != 0: - raise AssertionError( - 'expected empty targets, not "%s"' % targets - ) - return - if not re.match(match, targets, re.IGNORECASE): - raise AssertionError( - 'targets "%s" not match "%s"' % (targets, match) - ) - - def expect_target_flags(self, targets, groups={}, **kwargs): - match_dict = self.arg_regex(**kwargs) - if match_dict is None: - return - assert(isinstance(match_dict, dict)) - _, tar_flags = self.get_targets(targets=targets, groups=groups) - - for match_tar, match_flags in match_dict.items(): - if match_tar not in tar_flags: - raise AssertionError( - 'expected to find target "%s"' % match_tar - ) - flags = tar_flags[match_tar] - if not match_flags: - if len(flags) != 0: - raise AssertionError( - 'expected to find empty flags in target "%s"' % match_tar - ) - if not re.match(match_flags, flags): - raise AssertionError( - '"%s" flags "%s" not match "%s"' % (match_tar, flags, match_flags) - ) - - def test_interface(self): - wrong_arch = "ppc64" if self.arch != "ppc64" else "x86" - wrong_cc = "clang" if self.cc != "clang" else "icc" - opt = self.opt() - assert_(getattr(opt, "cc_on_" + self.arch)) - assert_(not getattr(opt, "cc_on_" + wrong_arch)) - assert_(getattr(opt, "cc_is_" + self.cc)) - assert_(not getattr(opt, "cc_is_" + wrong_cc)) - - def test_args_empty(self): - for baseline, dispatch in ( - ("", "none"), - (None, ""), - ("none +none", "none - none"), - ("none -max", "min - max"), - ("+vsx2 -VSX2", "vsx avx2 avx512f -max"), - ("max -vsx - avx + avx512f neon -MAX ", - "min -min + max -max -vsx + avx2 -avx2 +NONE") - ) : - opt = self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) - assert(len(opt.cpu_baseline_names()) == 0) - assert(len(opt.cpu_dispatch_names()) == 0) - - def test_args_validation(self): - if self.march() == "unknown": - return - # check sanity of argument's validation - for baseline, dispatch in ( - ("unkown_feature - max +min", "unknown max min"), # unknowing features - ("#avx2", "$vsx") # groups and polices aren't acceptable - ) : - try: - self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) - raise AssertionError("excepted an exception for invalid arguments") - except DistutilsError: - pass - - def test_skip(self): - # only takes what platform supports and skip the others - # without casing exceptions - self.expect( - "sse vsx neon", - x86="sse", ppc64="vsx", armhf="neon", unknown="" - ) - self.expect( - "sse41 avx avx2 vsx2 vsx3 neon_vfpv4 asimd", - x86 = "sse41 avx avx2", - ppc64 = "vsx2 vsx3", - armhf = "neon_vfpv4 asimd", - unknown = "" - ) - # any features in cpu_dispatch must be ignored if it's part of baseline - self.expect( - "sse neon vsx", baseline="sse neon vsx", - x86="", ppc64="", armhf="" - ) - self.expect( - "avx2 vsx3 asimdhp", baseline="avx2 vsx3 asimdhp", - x86="", ppc64="", armhf="" - ) - - def test_implies(self): - # baseline combining implied features, so we count - # on it instead of testing 'feature_implies()'' directly - self.expect_baseline( - "fma3 avx2 asimd vsx3", - # .* between two spaces can validate features in between - x86 = "sse .* sse41 .* fma3.*avx2", - ppc64 = "vsx vsx2 vsx3", - armhf = "neon neon_fp16 neon_vfpv4 asimd" - ) - """ - special cases - """ - # in icc and msvc, FMA3 and AVX2 can't be separated - # both need to implies each other, same for avx512f & cd - for f0, f1 in ( - ("fma3", "avx2"), - ("avx512f", "avx512cd"), - ): - diff = ".* sse42 .* %s .*%s$" % (f0, f1) - self.expect_baseline(f0, - x86_gcc=".* sse42 .* %s$" % f0, - x86_icc=diff, x86_iccw=diff - ) - self.expect_baseline(f1, - x86_gcc=".* avx .* %s$" % f1, - x86_icc=diff, x86_iccw=diff - ) - # in msvc, following features can't be separated too - for f in (("fma3", "avx2"), ("avx512f", "avx512cd", "avx512_skx")): - for ff in f: - self.expect_baseline(ff, - x86_msvc=".*%s" % ' '.join(f) - ) - - # in ppc64le VSX and VSX2 can't be separated - self.expect_baseline("vsx", ppc64le="vsx vsx2") - # in aarch64 following features can't be separated - for f in ("neon", "neon_fp16", "neon_vfpv4", "asimd"): - self.expect_baseline(f, aarch64="neon neon_fp16 neon_vfpv4 asimd") - - def test_args_options(self): - # max & native - for o in ("max", "native"): - if o == "native" and self.cc_name() == "msvc": - continue - self.expect(o, - trap_files=".*cpu_(sse|vsx|neon|vx).c", - x86="", ppc64="", armhf="", s390x="" - ) - self.expect(o, - trap_files=".*cpu_(sse3|vsx2|neon_vfpv4|vxe).c", - x86="sse sse2", ppc64="vsx", armhf="neon neon_fp16", - aarch64="", ppc64le="", s390x="vx" - ) - self.expect(o, - trap_files=".*cpu_(popcnt|vsx3).c", - x86="sse .* sse41", ppc64="vsx vsx2", - armhf="neon neon_fp16 .* asimd .*", - s390x="vx vxe vxe2" - ) - self.expect(o, - x86_gcc=".* xop fma4 .* avx512f .* avx512_knl avx512_knm avx512_skx .*", - # in icc, xop and fam4 aren't supported - x86_icc=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", - x86_iccw=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", - # in msvc, avx512_knl avx512_knm aren't supported - x86_msvc=".* xop fma4 .* avx512f .* avx512_skx .*", - armhf=".* asimd asimdhp asimddp .*", - ppc64="vsx vsx2 vsx3 vsx4.*", - s390x="vx vxe vxe2.*" - ) - # min - self.expect("min", - x86="sse sse2", x64="sse sse2 sse3", - armhf="", aarch64="neon neon_fp16 .* asimd", - ppc64="", ppc64le="vsx vsx2", s390x="" - ) - self.expect( - "min", trap_files=".*cpu_(sse2|vsx2).c", - x86="", ppc64le="" - ) - # an exception must triggered if native flag isn't supported - # when option "native" is activated through the args - try: - self.expect("native", - trap_flags=".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", - x86=".*", ppc64=".*", armhf=".*", s390x=".*", aarch64=".*", - ) - if self.march() != "unknown": - raise AssertionError( - "excepted an exception for %s" % self.march() - ) - except DistutilsError: - if self.march() == "unknown": - raise AssertionError("excepted no exceptions") - - def test_flags(self): - self.expect_flags( - "sse sse2 vsx vsx2 neon neon_fp16 vx vxe", - x86_gcc="-msse -msse2", x86_icc="-msse -msse2", - x86_iccw="/arch:SSE2", - x86_msvc="/arch:SSE2" if self.march() == "x86" else "", - ppc64_gcc= "-mcpu=power8", - ppc64_clang="-mcpu=power8", - armhf_gcc="-mfpu=neon-fp16 -mfp16-format=ieee", - aarch64="", - s390x="-mzvector -march=arch12" - ) - # testing normalize -march - self.expect_flags( - "asimd", - aarch64="", - armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8-a\+simd" - ) - self.expect_flags( - "asimdhp", - aarch64_gcc=r"-march=armv8.2-a\+fp16", - armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8.2-a\+fp16" - ) - self.expect_flags( - "asimddp", aarch64_gcc=r"-march=armv8.2-a\+dotprod" - ) - self.expect_flags( - # asimdfhm implies asimdhp - "asimdfhm", aarch64_gcc=r"-march=armv8.2-a\+fp16\+fp16fml" - ) - self.expect_flags( - "asimddp asimdhp asimdfhm", - aarch64_gcc=r"-march=armv8.2-a\+dotprod\+fp16\+fp16fml" - ) - self.expect_flags( - "vx vxe vxe2", - s390x=r"-mzvector -march=arch13" - ) - - def test_targets_exceptions(self): - for targets in ( - "bla bla", "/*@targets", - "/*@targets */", - "/*@targets unknown */", - "/*@targets $unknown_policy avx2 */", - "/*@targets #unknown_group avx2 */", - "/*@targets $ */", - "/*@targets # vsx */", - "/*@targets #$ vsx */", - "/*@targets vsx avx2 ) */", - "/*@targets vsx avx2 (avx2 */", - "/*@targets vsx avx2 () */", - "/*@targets vsx avx2 ($autovec) */", # no features - "/*@targets vsx avx2 (xxx) */", - "/*@targets vsx avx2 (baseline) */", - ) : - try: - self.expect_targets( - targets, - x86="", armhf="", ppc64="", s390x="" - ) - if self.march() != "unknown": - raise AssertionError( - "excepted an exception for %s" % self.march() - ) - except DistutilsError: - if self.march() == "unknown": - raise AssertionError("excepted no exceptions") - - def test_targets_syntax(self): - for targets in ( - "/*@targets $keep_baseline sse vsx neon vx*/", - "/*@targets,$keep_baseline,sse,vsx,neon vx*/", - "/*@targets*$keep_baseline*sse*vsx*neon*vx*/", - """ - /* - ** @targets - ** $keep_baseline, sse vsx,neon, vx - */ - """, - """ - /* - ************@targets**************** - ** $keep_baseline, sse vsx, neon, vx - ************************************ - */ - """, - """ - /* - /////////////@targets///////////////// - //$keep_baseline//sse//vsx//neon//vx - ///////////////////////////////////// - */ - """, - """ - /* - @targets - $keep_baseline - SSE VSX NEON VX*/ - """ - ) : - self.expect_targets(targets, - x86="sse", ppc64="vsx", armhf="neon", s390x="vx", unknown="" - ) - - def test_targets(self): - # test skipping baseline features - self.expect_targets( - """ - /*@targets - sse sse2 sse41 avx avx2 avx512f - vsx vsx2 vsx3 vsx4 - neon neon_fp16 asimdhp asimddp - vx vxe vxe2 - */ - """, - baseline="avx vsx2 asimd vx vxe", - x86="avx512f avx2", armhf="asimddp asimdhp", ppc64="vsx4 vsx3", - s390x="vxe2" - ) - # test skipping non-dispatch features - self.expect_targets( - """ - /*@targets - sse41 avx avx2 avx512f - vsx2 vsx3 vsx4 - asimd asimdhp asimddp - vx vxe vxe2 - */ - """, - baseline="", dispatch="sse41 avx2 vsx2 asimd asimddp vxe2", - x86="avx2 sse41", armhf="asimddp asimd", ppc64="vsx2", s390x="vxe2" - ) - # test skipping features that not supported - self.expect_targets( - """ - /*@targets - sse2 sse41 avx2 avx512f - vsx2 vsx3 vsx4 - neon asimdhp asimddp - vx vxe vxe2 - */ - """, - baseline="", - trap_files=".*(avx2|avx512f|vsx3|vsx4|asimddp|vxe2).c", - x86="sse41 sse2", ppc64="vsx2", armhf="asimdhp neon", - s390x="vxe vx" - ) - # test skipping features that implies each other - self.expect_targets( - """ - /*@targets - sse sse2 avx fma3 avx2 avx512f avx512cd - vsx vsx2 vsx3 - neon neon_vfpv4 neon_fp16 neon_fp16 asimd asimdhp - asimddp asimdfhm - */ - """, - baseline="", - x86_gcc="avx512cd avx512f avx2 fma3 avx sse2", - x86_msvc="avx512cd avx2 avx sse2", - x86_icc="avx512cd avx2 avx sse2", - x86_iccw="avx512cd avx2 avx sse2", - ppc64="vsx3 vsx2 vsx", - ppc64le="vsx3 vsx2", - armhf="asimdfhm asimddp asimdhp asimd neon_vfpv4 neon_fp16 neon", - aarch64="asimdfhm asimddp asimdhp asimd" - ) - - def test_targets_policies(self): - # 'keep_baseline', generate objects for baseline features - self.expect_targets( - """ - /*@targets - $keep_baseline - sse2 sse42 avx2 avx512f - vsx2 vsx3 - neon neon_vfpv4 asimd asimddp - vx vxe vxe2 - */ - """, - baseline="sse41 avx2 vsx2 asimd vsx3 vxe", - x86="avx512f avx2 sse42 sse2", - ppc64="vsx3 vsx2", - armhf="asimddp asimd neon_vfpv4 neon", - # neon, neon_vfpv4, asimd implies each other - aarch64="asimddp asimd", - s390x="vxe2 vxe vx" - ) - # 'keep_sort', leave the sort as-is - self.expect_targets( - """ - /*@targets - $keep_baseline $keep_sort - avx512f sse42 avx2 sse2 - vsx2 vsx3 - asimd neon neon_vfpv4 asimddp - vxe vxe2 - */ - """, - x86="avx512f sse42 avx2 sse2", - ppc64="vsx2 vsx3", - armhf="asimd neon neon_vfpv4 asimddp", - # neon, neon_vfpv4, asimd implies each other - aarch64="asimd asimddp", - s390x="vxe vxe2" - ) - # 'autovec', skipping features that can't be - # vectorized by the compiler - self.expect_targets( - """ - /*@targets - $keep_baseline $keep_sort $autovec - avx512f avx2 sse42 sse41 sse2 - vsx3 vsx2 - asimddp asimd neon_vfpv4 neon - */ - """, - x86_gcc="avx512f avx2 sse42 sse41 sse2", - x86_icc="avx512f avx2 sse42 sse41 sse2", - x86_iccw="avx512f avx2 sse42 sse41 sse2", - x86_msvc="avx512f avx2 sse2" - if self.march() == 'x86' else "avx512f avx2", - ppc64="vsx3 vsx2", - armhf="asimddp asimd neon_vfpv4 neon", - # neon, neon_vfpv4, asimd implies each other - aarch64="asimddp asimd" - ) - for policy in ("$maxopt", "$autovec"): - # 'maxopt' and autovec set the max acceptable optimization flags - self.expect_target_flags( - "/*@targets baseline %s */" % policy, - gcc={"baseline":".*-O3.*"}, icc={"baseline":".*-O3.*"}, - iccw={"baseline":".*/O3.*"}, msvc={"baseline":".*/O2.*"}, - unknown={"baseline":".*"} - ) - - # 'werror', force compilers to treat warnings as errors - self.expect_target_flags( - "/*@targets baseline $werror */", - gcc={"baseline":".*-Werror.*"}, icc={"baseline":".*-Werror.*"}, - iccw={"baseline":".*/Werror.*"}, msvc={"baseline":".*/WX.*"}, - unknown={"baseline":".*"} - ) - - def test_targets_groups(self): - self.expect_targets( - """ - /*@targets $keep_baseline baseline #test_group */ - """, - groups=dict( - test_group=(""" - $keep_baseline - asimddp sse2 vsx2 avx2 vsx3 - avx512f asimdhp - """) - ), - x86="avx512f avx2 sse2 baseline", - ppc64="vsx3 vsx2 baseline", - armhf="asimddp asimdhp baseline" - ) - # test skip duplicating and sorting - self.expect_targets( - """ - /*@targets - * sse42 avx avx512f - * #test_group_1 - * vsx2 - * #test_group_2 - * asimddp asimdfhm - */ - """, - groups=dict( - test_group_1=(""" - VSX2 vsx3 asimd avx2 SSE41 - """), - test_group_2=(""" - vsx2 vsx3 asImd aVx2 sse41 - """) - ), - x86="avx512f avx2 avx sse42 sse41", - ppc64="vsx3 vsx2", - # vsx2 part of the default baseline of ppc64le, option ("min") - ppc64le="vsx3", - armhf="asimdfhm asimddp asimd", - # asimd part of the default baseline of aarch64, option ("min") - aarch64="asimdfhm asimddp" - ) - - def test_targets_multi(self): - self.expect_targets( - """ - /*@targets - (avx512_clx avx512_cnl) (asimdhp asimddp) - */ - """, - x86=r"\(avx512_clx avx512_cnl\)", - armhf=r"\(asimdhp asimddp\)", - ) - # test skipping implied features and auto-sort - self.expect_targets( - """ - /*@targets - f16c (sse41 avx sse42) (sse3 avx2 avx512f) - vsx2 (vsx vsx3 vsx2) - (neon neon_vfpv4 asimd asimdhp asimddp) - */ - """, - x86="avx512f f16c avx", - ppc64="vsx3 vsx2", - ppc64le="vsx3", # vsx2 part of baseline - armhf=r"\(asimdhp asimddp\)", - ) - # test skipping implied features and keep sort - self.expect_targets( - """ - /*@targets $keep_sort - (sse41 avx sse42) (sse3 avx2 avx512f) - (vsx vsx3 vsx2) - (asimddp neon neon_vfpv4 asimd asimdhp) - (vx vxe vxe2) - */ - """, - x86="avx avx512f", - ppc64="vsx3", - armhf=r"\(asimdhp asimddp\)", - s390x="vxe2" - ) - # test compiler variety and avoiding duplicating - self.expect_targets( - """ - /*@targets $keep_sort - fma3 avx2 (fma3 avx2) (avx2 fma3) avx2 fma3 - */ - """, - x86_gcc=r"fma3 avx2 \(fma3 avx2\)", - x86_icc="avx2", x86_iccw="avx2", - x86_msvc="avx2" - ) - -def new_test(arch, cc): - if is_standalone: return textwrap.dedent("""\ - class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt, unittest.TestCase): - arch = '{arch}' - cc = '{cc}' - def __init__(self, methodName="runTest"): - unittest.TestCase.__init__(self, methodName) - self.setup_class() - """).format( - class_name=arch + '_' + cc, arch=arch, cc=cc - ) - return textwrap.dedent("""\ - class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt): - arch = '{arch}' - cc = '{cc}' - """).format( - class_name=arch + '_' + cc, arch=arch, cc=cc - ) -""" -if 1 and is_standalone: - FakeCCompilerOpt.fake_info = "x86_icc" - cco = FakeCCompilerOpt(None, cpu_baseline="avx2") - print(' '.join(cco.cpu_baseline_names())) - print(cco.cpu_baseline_flags()) - unittest.main() - sys.exit() -""" -for arch, compilers in arch_compilers.items(): - for cc in compilers: - exec(new_test(arch, cc)) - -if is_standalone: - unittest.main() diff --git a/numpy/distutils/tests/test_ccompiler_opt_conf.py b/numpy/distutils/tests/test_ccompiler_opt_conf.py deleted file mode 100644 index d9e8b2b0a834..000000000000 --- a/numpy/distutils/tests/test_ccompiler_opt_conf.py +++ /dev/null @@ -1,176 +0,0 @@ -import unittest -from os import sys, path - -is_standalone = __name__ == '__main__' and __package__ is None -if is_standalone: - sys.path.append(path.abspath(path.join(path.dirname(__file__), ".."))) - from ccompiler_opt import CCompilerOpt -else: - from numpy.distutils.ccompiler_opt import CCompilerOpt - -arch_compilers = dict( - x86 = ("gcc", "clang", "icc", "iccw", "msvc"), - x64 = ("gcc", "clang", "icc", "iccw", "msvc"), - ppc64 = ("gcc", "clang"), - ppc64le = ("gcc", "clang"), - armhf = ("gcc", "clang"), - aarch64 = ("gcc", "clang"), - narch = ("gcc",) -) - -class FakeCCompilerOpt(CCompilerOpt): - fake_info = ("arch", "compiler", "extra_args") - def __init__(self, *args, **kwargs): - CCompilerOpt.__init__(self, None, **kwargs) - def dist_compile(self, sources, flags, **kwargs): - return sources - def dist_info(self): - return FakeCCompilerOpt.fake_info - @staticmethod - def dist_log(*args, stderr=False): - pass - -class _TestConfFeatures(FakeCCompilerOpt): - """A hook to check the sanity of configured features -- before it called by the abstract class '_Feature' - """ - - def conf_features_partial(self): - conf_all = self.conf_features - for feature_name, feature in conf_all.items(): - self.test_feature( - "attribute conf_features", - conf_all, feature_name, feature - ) - - conf_partial = FakeCCompilerOpt.conf_features_partial(self) - for feature_name, feature in conf_partial.items(): - self.test_feature( - "conf_features_partial()", - conf_partial, feature_name, feature - ) - return conf_partial - - def test_feature(self, log, search_in, feature_name, feature_dict): - error_msg = ( - "during validate '{}' within feature '{}', " - "march '{}' and compiler '{}'\n>> " - ).format(log, feature_name, self.cc_march, self.cc_name) - - if not feature_name.isupper(): - raise AssertionError(error_msg + "feature name must be in uppercase") - - for option, val in feature_dict.items(): - self.test_option_types(error_msg, option, val) - self.test_duplicates(error_msg, option, val) - - self.test_implies(error_msg, search_in, feature_name, feature_dict) - self.test_group(error_msg, search_in, feature_name, feature_dict) - self.test_extra_checks(error_msg, search_in, feature_name, feature_dict) - - def test_option_types(self, error_msg, option, val): - for tp, available in ( - ((str, list), ( - "implies", "headers", "flags", "group", "detect", "extra_checks" - )), - ((str,), ("disable",)), - ((int,), ("interest",)), - ((bool,), ("implies_detect",)), - ((bool, type(None)), ("autovec",)), - ) : - found_it = option in available - if not found_it: - continue - if not isinstance(val, tp): - error_tp = [t.__name__ for t in (*tp,)] - error_tp = ' or '.join(error_tp) - raise AssertionError(error_msg + - "expected '%s' type for option '%s' not '%s'" % ( - error_tp, option, type(val).__name__ - )) - break - - if not found_it: - raise AssertionError(error_msg + "invalid option name '%s'" % option) - - def test_duplicates(self, error_msg, option, val): - if option not in ( - "implies", "headers", "flags", "group", "detect", "extra_checks" - ) : return - - if isinstance(val, str): - val = val.split() - - if len(val) != len(set(val)): - raise AssertionError(error_msg + "duplicated values in option '%s'" % option) - - def test_implies(self, error_msg, search_in, feature_name, feature_dict): - if feature_dict.get("disabled") is not None: - return - implies = feature_dict.get("implies", "") - if not implies: - return - if isinstance(implies, str): - implies = implies.split() - - if feature_name in implies: - raise AssertionError(error_msg + "feature implies itself") - - for impl in implies: - impl_dict = search_in.get(impl) - if impl_dict is not None: - if "disable" in impl_dict: - raise AssertionError(error_msg + "implies disabled feature '%s'" % impl) - continue - raise AssertionError(error_msg + "implies non-exist feature '%s'" % impl) - - def test_group(self, error_msg, search_in, feature_name, feature_dict): - if feature_dict.get("disabled") is not None: - return - group = feature_dict.get("group", "") - if not group: - return - if isinstance(group, str): - group = group.split() - - for f in group: - impl_dict = search_in.get(f) - if not impl_dict or "disable" in impl_dict: - continue - raise AssertionError(error_msg + - "in option 'group', '%s' already exists as a feature name" % f - ) - - def test_extra_checks(self, error_msg, search_in, feature_name, feature_dict): - if feature_dict.get("disabled") is not None: - return - extra_checks = feature_dict.get("extra_checks", "") - if not extra_checks: - return - if isinstance(extra_checks, str): - extra_checks = extra_checks.split() - - for f in extra_checks: - impl_dict = search_in.get(f) - if not impl_dict or "disable" in impl_dict: - continue - raise AssertionError(error_msg + - "in option 'extra_checks', extra test case '%s' already exists as a feature name" % f - ) - -class TestConfFeatures(unittest.TestCase): - def __init__(self, methodName="runTest"): - unittest.TestCase.__init__(self, methodName) - self._setup() - - def _setup(self): - FakeCCompilerOpt.conf_nocache = True - - def test_features(self): - for arch, compilers in arch_compilers.items(): - for cc in compilers: - FakeCCompilerOpt.fake_info = (arch, cc, "") - _TestConfFeatures() - -if is_standalone: - unittest.main() diff --git a/numpy/distutils/tests/test_exec_command.py b/numpy/distutils/tests/test_exec_command.py deleted file mode 100644 index 749523528e63..000000000000 --- a/numpy/distutils/tests/test_exec_command.py +++ /dev/null @@ -1,217 +0,0 @@ -import os -import pytest -import sys -from tempfile import TemporaryFile - -from numpy.distutils import exec_command -from numpy.distutils.exec_command import get_pythonexe -from numpy.testing import tempdir, assert_, IS_WASM - - -# In python 3 stdout, stderr are text (unicode compliant) devices, so to -# emulate them import StringIO from the io module. -from io import StringIO - -class redirect_stdout: - """Context manager to redirect stdout for exec_command test.""" - def __init__(self, stdout=None): - self._stdout = stdout or sys.stdout - - def __enter__(self): - self.old_stdout = sys.stdout - sys.stdout = self._stdout - - def __exit__(self, exc_type, exc_value, traceback): - self._stdout.flush() - sys.stdout = self.old_stdout - # note: closing sys.stdout won't close it. - self._stdout.close() - -class redirect_stderr: - """Context manager to redirect stderr for exec_command test.""" - def __init__(self, stderr=None): - self._stderr = stderr or sys.stderr - - def __enter__(self): - self.old_stderr = sys.stderr - sys.stderr = self._stderr - - def __exit__(self, exc_type, exc_value, traceback): - self._stderr.flush() - sys.stderr = self.old_stderr - # note: closing sys.stderr won't close it. - self._stderr.close() - -class emulate_nonposix: - """Context manager to emulate os.name != 'posix' """ - def __init__(self, osname='non-posix'): - self._new_name = osname - - def __enter__(self): - self._old_name = os.name - os.name = self._new_name - - def __exit__(self, exc_type, exc_value, traceback): - os.name = self._old_name - - -def test_exec_command_stdout(): - # Regression test for gh-2999 and gh-2915. - # There are several packages (nose, scipy.weave.inline, Sage inline - # Fortran) that replace stdout, in which case it doesn't have a fileno - # method. This is tested here, with a do-nothing command that fails if the - # presence of fileno() is assumed in exec_command. - - # The code has a special case for posix systems, so if we are on posix test - # both that the special case works and that the generic code works. - - # Test posix version: - with redirect_stdout(StringIO()): - with redirect_stderr(TemporaryFile()): - with pytest.warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - if os.name == 'posix': - # Test general (non-posix) version: - with emulate_nonposix(): - with redirect_stdout(StringIO()): - with redirect_stderr(TemporaryFile()): - with pytest.warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - -def test_exec_command_stderr(): - # Test posix version: - with redirect_stdout(TemporaryFile(mode='w+')): - with redirect_stderr(StringIO()): - with pytest.warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - if os.name == 'posix': - # Test general (non-posix) version: - with emulate_nonposix(): - with redirect_stdout(TemporaryFile()): - with redirect_stderr(StringIO()): - with pytest.warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - -@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -class TestExecCommand: - def setup_method(self): - self.pyexe = get_pythonexe() - - def check_nt(self, **kws): - s, o = exec_command.exec_command('cmd /C echo path=%path%') - assert_(s == 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe) - assert_(s == 0) - assert_(o == 'win32') - - def check_posix(self, **kws): - s, o = exec_command.exec_command("echo Hello", **kws) - assert_(s == 0) - assert_(o == 'Hello') - - s, o = exec_command.exec_command('echo $AAA', **kws) - assert_(s == 0) - assert_(o == '') - - s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws) - assert_(s == 0) - assert_(o == 'Tere') - - s, o = exec_command.exec_command('echo "$AAA"', **kws) - assert_(s == 0) - assert_(o == '') - - if 'BBB' not in os.environ: - os.environ['BBB'] = 'Hi' - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == 'Hi') - - s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws) - assert_(s == 0) - assert_(o == 'Hey') - - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == 'Hi') - - del os.environ['BBB'] - - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == '') - - - s, o = exec_command.exec_command('this_is_not_a_command', **kws) - assert_(s != 0) - assert_(o != '') - - s, o = exec_command.exec_command('echo path=$PATH', **kws) - assert_(s == 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys,os;sys.stderr.write(os.name)"' % - self.pyexe, **kws) - assert_(s == 0) - assert_(o == 'posix') - - def check_basic(self, *kws): - s, o = exec_command.exec_command( - '"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws) - assert_(s != 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.stderr.write(\'0\');' - 'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' % - self.pyexe, **kws) - assert_(s == 0) - assert_(o == '012') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws) - assert_(s == 15) - assert_(o == '') - - s, o = exec_command.exec_command( - '"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws) - assert_(s == 0) - assert_(o == 'Heipa') - - def check_execute_in(self, **kws): - with tempdir() as tmpdir: - fn = "file" - tmpfile = os.path.join(tmpdir, fn) - with open(tmpfile, 'w') as f: - f.write('Hello') - - s, o = exec_command.exec_command( - '"%s" -c "f = open(\'%s\', \'r\'); f.close()"' % - (self.pyexe, fn), **kws) - assert_(s != 0) - assert_(o != '') - s, o = exec_command.exec_command( - '"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); ' - 'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws) - assert_(s == 0) - assert_(o == 'Hello') - - def test_basic(self): - with redirect_stdout(StringIO()): - with redirect_stderr(StringIO()): - with pytest.warns(DeprecationWarning): - if os.name == "posix": - self.check_posix(use_tee=0) - self.check_posix(use_tee=1) - elif os.name == "nt": - self.check_nt(use_tee=0) - self.check_nt(use_tee=1) - self.check_execute_in(use_tee=0) - self.check_execute_in(use_tee=1) diff --git a/numpy/distutils/tests/test_fcompiler.py b/numpy/distutils/tests/test_fcompiler.py deleted file mode 100644 index dd97f1e72afc..000000000000 --- a/numpy/distutils/tests/test_fcompiler.py +++ /dev/null @@ -1,43 +0,0 @@ -from numpy.testing import assert_ -import numpy.distutils.fcompiler - -customizable_flags = [ - ('f77', 'F77FLAGS'), - ('f90', 'F90FLAGS'), - ('free', 'FREEFLAGS'), - ('arch', 'FARCH'), - ('debug', 'FDEBUG'), - ('flags', 'FFLAGS'), - ('linker_so', 'LDFLAGS'), -] - - -def test_fcompiler_flags(monkeypatch): - monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0') - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none') - flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None) - - for opt, envvar in customizable_flags: - new_flag = '-dummy-{}-flag'.format(opt) - prev_flags = getattr(flag_vars, opt) - - monkeypatch.setenv(envvar, new_flag) - new_flags = getattr(flag_vars, opt) - - monkeypatch.delenv(envvar) - assert_(new_flags == [new_flag]) - - monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1') - - for opt, envvar in customizable_flags: - new_flag = '-dummy-{}-flag'.format(opt) - prev_flags = getattr(flag_vars, opt) - monkeypatch.setenv(envvar, new_flag) - new_flags = getattr(flag_vars, opt) - - monkeypatch.delenv(envvar) - if prev_flags is None: - assert_(new_flags == [new_flag]) - else: - assert_(new_flags == prev_flags + [new_flag]) - diff --git a/numpy/distutils/tests/test_fcompiler_gnu.py b/numpy/distutils/tests/test_fcompiler_gnu.py deleted file mode 100644 index 0817ae58c214..000000000000 --- a/numpy/distutils/tests/test_fcompiler_gnu.py +++ /dev/null @@ -1,55 +0,0 @@ -from numpy.testing import assert_ - -import numpy.distutils.fcompiler - -g77_version_strings = [ - ('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'), - ('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'), - ('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'), - ('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'), - ('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2' - ' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'), -] - -gfortran_version_strings = [ - ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))', - '4.0.3'), - ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'), - ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'), - ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'), - ('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'), - ('4.8.0', '4.8.0'), - ('4.0.3-7', '4.0.3'), - ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n4.9.1", - '4.9.1'), - ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n" - "gfortran: warning: yet another warning\n4.9.1", - '4.9.1'), - ('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0') -] - -class TestG77Versions: - def test_g77_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, version in g77_version_strings: - v = fc.version_match(vs) - assert_(v == version, (vs, v)) - - def test_not_g77(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, _ in gfortran_version_strings: - v = fc.version_match(vs) - assert_(v is None, (vs, v)) - -class TestGFortranVersions: - def test_gfortran_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, version in gfortran_version_strings: - v = fc.version_match(vs) - assert_(v == version, (vs, v)) - - def test_not_gfortran(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, _ in g77_version_strings: - v = fc.version_match(vs) - assert_(v is None, (vs, v)) diff --git a/numpy/distutils/tests/test_fcompiler_intel.py b/numpy/distutils/tests/test_fcompiler_intel.py deleted file mode 100644 index 45c9cdac1910..000000000000 --- a/numpy/distutils/tests/test_fcompiler_intel.py +++ /dev/null @@ -1,30 +0,0 @@ -import numpy.distutils.fcompiler -from numpy.testing import assert_ - - -intel_32bit_version_strings = [ - ("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications" - "running on Intel(R) 32, Version 11.1", '11.1'), -] - -intel_64bit_version_strings = [ - ("Intel(R) Fortran IA-64 Compiler Professional for applications" - "running on IA-64, Version 11.0", '11.0'), - ("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications" - "running on Intel(R) 64, Version 11.1", '11.1') -] - -class TestIntelFCompilerVersions: - def test_32bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel') - for vs, version in intel_32bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) - - -class TestIntelEM64TFCompilerVersions: - def test_64bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem') - for vs, version in intel_64bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) diff --git a/numpy/distutils/tests/test_fcompiler_nagfor.py b/numpy/distutils/tests/test_fcompiler_nagfor.py deleted file mode 100644 index 2e04f5266dc1..000000000000 --- a/numpy/distutils/tests/test_fcompiler_nagfor.py +++ /dev/null @@ -1,22 +0,0 @@ -from numpy.testing import assert_ -import numpy.distutils.fcompiler - -nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release ' - '6.2(Chiyoda) Build 6200', '6.2'), - ('nagfor', 'NAG Fortran Compiler Release ' - '6.1(Tozai) Build 6136', '6.1'), - ('nagfor', 'NAG Fortran Compiler Release ' - '6.0(Hibiya) Build 1021', '6.0'), - ('nagfor', 'NAG Fortran Compiler Release ' - '5.3.2(971)', '5.3.2'), - ('nag', 'NAGWare Fortran 95 compiler Release 5.1' - '(347,355-367,375,380-383,389,394,399,401-402,407,' - '431,435,437,446,459-460,463,472,494,496,503,508,' - '511,517,529,555,557,565)', '5.1')] - -class TestNagFCompilerVersions: - def test_version_match(self): - for comp, vs, version in nag_version_strings: - fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp) - v = fc.version_match(vs) - assert_(v == version) diff --git a/numpy/distutils/tests/test_from_template.py b/numpy/distutils/tests/test_from_template.py deleted file mode 100644 index 588175496299..000000000000 --- a/numpy/distutils/tests/test_from_template.py +++ /dev/null @@ -1,44 +0,0 @@ - -from numpy.distutils.from_template import process_str -from numpy.testing import assert_equal - - -pyf_src = """ -python module foo - <_rd=real,double precision> - interface - subroutine foosub(tol) - <_rd>, intent(in,out) :: tol - end subroutine foosub - end interface -end python module foo -""" - -expected_pyf = """ -python module foo - interface - subroutine sfoosub(tol) - real, intent(in,out) :: tol - end subroutine sfoosub - subroutine dfoosub(tol) - double precision, intent(in,out) :: tol - end subroutine dfoosub - end interface -end python module foo -""" - - -def normalize_whitespace(s): - """ - Remove leading and trailing whitespace, and convert internal - stretches of whitespace to a single space. - """ - return ' '.join(s.split()) - - -def test_from_template(): - """Regression test for gh-10712.""" - pyf = process_str(pyf_src) - normalized_pyf = normalize_whitespace(pyf) - normalized_expected_pyf = normalize_whitespace(expected_pyf) - assert_equal(normalized_pyf, normalized_expected_pyf) diff --git a/numpy/distutils/tests/test_log.py b/numpy/distutils/tests/test_log.py deleted file mode 100644 index 72fddf37370f..000000000000 --- a/numpy/distutils/tests/test_log.py +++ /dev/null @@ -1,34 +0,0 @@ -import io -import re -from contextlib import redirect_stdout - -import pytest - -from numpy.distutils import log - - -def setup_module(): - f = io.StringIO() # changing verbosity also logs here, capture that - with redirect_stdout(f): - log.set_verbosity(2, force=True) # i.e. DEBUG - - -def teardown_module(): - log.set_verbosity(0, force=True) # the default - - -r_ansi = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") - - -@pytest.mark.parametrize("func_name", ["error", "warn", "info", "debug"]) -def test_log_prefix(func_name): - func = getattr(log, func_name) - msg = f"{func_name} message" - f = io.StringIO() - with redirect_stdout(f): - func(msg) - out = f.getvalue() - assert out # sanity check - clean_out = r_ansi.sub("", out) - line = next(line for line in clean_out.splitlines()) - assert line == f"{func_name.upper()}: {msg}" diff --git a/numpy/distutils/tests/test_mingw32ccompiler.py b/numpy/distutils/tests/test_mingw32ccompiler.py deleted file mode 100644 index c4eac7b72de1..000000000000 --- a/numpy/distutils/tests/test_mingw32ccompiler.py +++ /dev/null @@ -1,47 +0,0 @@ -import shutil -import subprocess -import sys -import pytest -import os -import sysconfig - -from numpy.distutils import mingw32ccompiler - - -@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test') -@pytest.mark.skipif(not os.path.exists(os.path.join(sys.prefix, 'libs')), - reason="test requires mingw library layout") -@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='mingw GNU objdump does not understand arm64 binary format yet') -def test_build_import(): - '''Test the mingw32ccompiler.build_import_library, which builds a - `python.a` from the MSVC `python.lib` - ''' - - # make sure `nm.exe` exists and supports the current python version. This - # can get mixed up when the PATH has a 64-bit nm but the python is 32-bit - try: - out = subprocess.check_output(['nm.exe', '--help']) - except FileNotFoundError: - pytest.skip("'nm.exe' not on path, is mingw installed?") - supported = out[out.find(b'supported targets:'):] - if sys.maxsize < 2**32: - if b'pe-i386' not in supported: - raise ValueError("'nm.exe' found but it does not support 32-bit " - "dlls when using 32-bit python. Supported " - "formats: '%s'" % supported) - elif b'pe-x86-64' not in supported: - raise ValueError("'nm.exe' found but it does not support 64-bit " - "dlls when using 64-bit python. Supported " - "formats: '%s'" % supported) - # Hide the import library to force a build - has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib() - if has_import_lib: - shutil.move(fullpath, fullpath + '.bak') - - try: - # Whew, now we can actually test the function - mingw32ccompiler.build_import_library() - - finally: - if has_import_lib: - shutil.move(fullpath + '.bak', fullpath) diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py deleted file mode 100644 index 40e7606eeb76..000000000000 --- a/numpy/distutils/tests/test_misc_util.py +++ /dev/null @@ -1,88 +0,0 @@ -from os.path import join, sep, dirname - -import pytest - -from numpy.distutils.misc_util import ( - appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info - ) -from numpy.testing import ( - assert_, assert_equal, IS_EDITABLE - ) - -ajoin = lambda *paths: join(*((sep,)+paths)) - -class TestAppendpath: - - def test_1(self): - assert_equal(appendpath('prefix', 'name'), join('prefix', 'name')) - assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name')) - assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name')) - assert_equal(appendpath('prefix', '/name'), join('prefix', 'name')) - - def test_2(self): - assert_equal(appendpath('prefix/sub', 'name'), - join('prefix', 'sub', 'name')) - assert_equal(appendpath('prefix/sub', 'sup/name'), - join('prefix', 'sub', 'sup', 'name')) - assert_equal(appendpath('/prefix/sub', '/prefix/name'), - ajoin('prefix', 'sub', 'name')) - - def test_3(self): - assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'), - ajoin('prefix', 'sub', 'sup', 'name')) - assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'), - ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name')) - assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'), - ajoin('prefix', 'sub', 'sub2', 'sup', 'name')) - -class TestMinrelpath: - - def test_1(self): - n = lambda path: path.replace('/', sep) - assert_equal(minrelpath(n('aa/bb')), n('aa/bb')) - assert_equal(minrelpath('..'), '..') - assert_equal(minrelpath(n('aa/..')), '') - assert_equal(minrelpath(n('aa/../bb')), 'bb') - assert_equal(minrelpath(n('aa/bb/..')), 'aa') - assert_equal(minrelpath(n('aa/bb/../..')), '') - assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd')) - assert_equal(minrelpath(n('.././..')), n('../..')) - assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd')) - -class TestGpaths: - - def test_gpaths(self): - local_path = minrelpath(join(dirname(__file__), '..')) - ls = gpaths('command/*.py', local_path) - assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls)) - f = gpaths('system_info.py', local_path) - assert_(join(local_path, 'system_info.py') == f[0], repr(f)) - -class TestSharedExtension: - - def test_get_shared_lib_extension(self): - import sys - ext = get_shared_lib_extension(is_python_ext=False) - if sys.platform.startswith('linux'): - assert_equal(ext, '.so') - elif sys.platform.startswith('gnukfreebsd'): - assert_equal(ext, '.so') - elif sys.platform.startswith('darwin'): - assert_equal(ext, '.dylib') - elif sys.platform.startswith('win'): - assert_equal(ext, '.dll') - # just check for no crash - assert_(get_shared_lib_extension(is_python_ext=True)) - - -@pytest.mark.skipif( - IS_EDITABLE, - reason="`get_info` .ini lookup method incompatible with editable install" -) -def test_installed_npymath_ini(): - # Regression test for gh-7707. If npymath.ini wasn't installed, then this - # will give an error. - info = get_info('npymath') - - assert isinstance(info, dict) - assert "define_macros" in info diff --git a/numpy/distutils/tests/test_npy_pkg_config.py b/numpy/distutils/tests/test_npy_pkg_config.py deleted file mode 100644 index b287ebe2e832..000000000000 --- a/numpy/distutils/tests/test_npy_pkg_config.py +++ /dev/null @@ -1,84 +0,0 @@ -import os - -from numpy.distutils.npy_pkg_config import read_config, parse_flags -from numpy.testing import temppath, assert_ - -simple = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[default] -cflags = -I/usr/include -libs = -L/usr/lib -""" -simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib', - 'version': '0.1', 'name': 'foo'} - -simple_variable = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[variables] -prefix = /foo/bar -libdir = ${prefix}/lib -includedir = ${prefix}/include - -[default] -cflags = -I${includedir} -libs = -L${libdir} -""" -simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', - 'version': '0.1', 'name': 'foo'} - -class TestLibraryInfo: - def test_simple(self): - with temppath('foo.ini') as path: - with open(path, 'w') as f: - f.write(simple) - pkg = os.path.splitext(path)[0] - out = read_config(pkg) - - assert_(out.cflags() == simple_d['cflags']) - assert_(out.libs() == simple_d['libflags']) - assert_(out.name == simple_d['name']) - assert_(out.version == simple_d['version']) - - def test_simple_variable(self): - with temppath('foo.ini') as path: - with open(path, 'w') as f: - f.write(simple_variable) - pkg = os.path.splitext(path)[0] - out = read_config(pkg) - - assert_(out.cflags() == simple_variable_d['cflags']) - assert_(out.libs() == simple_variable_d['libflags']) - assert_(out.name == simple_variable_d['name']) - assert_(out.version == simple_variable_d['version']) - out.vars['prefix'] = '/Users/david' - assert_(out.cflags() == '-I/Users/david/include') - -class TestParseFlags: - def test_simple_cflags(self): - d = parse_flags("-I/usr/include") - assert_(d['include_dirs'] == ['/usr/include']) - - d = parse_flags("-I/usr/include -DFOO") - assert_(d['include_dirs'] == ['/usr/include']) - assert_(d['macros'] == ['FOO']) - - d = parse_flags("-I /usr/include -DFOO") - assert_(d['include_dirs'] == ['/usr/include']) - assert_(d['macros'] == ['FOO']) - - def test_simple_lflags(self): - d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") - assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - assert_(d['libraries'] == ['foo', 'bar']) - - d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") - assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - assert_(d['libraries'] == ['foo', 'bar']) diff --git a/numpy/distutils/tests/test_shell_utils.py b/numpy/distutils/tests/test_shell_utils.py deleted file mode 100644 index 696d38ddd66a..000000000000 --- a/numpy/distutils/tests/test_shell_utils.py +++ /dev/null @@ -1,79 +0,0 @@ -import pytest -import subprocess -import json -import sys - -from numpy.distutils import _shell_utils -from numpy.testing import IS_WASM - -argv_cases = [ - [r'exe'], - [r'path/exe'], - [r'path\exe'], - [r'\\server\path\exe'], - [r'path to/exe'], - [r'path to\exe'], - - [r'exe', '--flag'], - [r'path/exe', '--flag'], - [r'path\exe', '--flag'], - [r'path to/exe', '--flag'], - [r'path to\exe', '--flag'], - - # flags containing literal quotes in their name - [r'path to/exe', '--flag-"quoted"'], - [r'path to\exe', '--flag-"quoted"'], - [r'path to/exe', '"--flag-quoted"'], - [r'path to\exe', '"--flag-quoted"'], -] - - -@pytest.fixture(params=[ - _shell_utils.WindowsParser, - _shell_utils.PosixParser -]) -def Parser(request): - return request.param - - -@pytest.fixture -def runner(Parser): - if Parser != _shell_utils.NativeParser: - pytest.skip('Unable to run with non-native parser') - - if Parser == _shell_utils.WindowsParser: - return lambda cmd: subprocess.check_output(cmd) - elif Parser == _shell_utils.PosixParser: - # posix has no non-shell string parsing - return lambda cmd: subprocess.check_output(cmd, shell=True) - else: - raise NotImplementedError - - -@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -@pytest.mark.parametrize('argv', argv_cases) -def test_join_matches_subprocess(Parser, runner, argv): - """ - Test that join produces strings understood by subprocess - """ - # invoke python to return its arguments as json - cmd = [ - sys.executable, '-c', - 'import json, sys; print(json.dumps(sys.argv[1:]))' - ] - joined = Parser.join(cmd + argv) - json_out = runner(joined).decode() - assert json.loads(json_out) == argv - - -@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -@pytest.mark.parametrize('argv', argv_cases) -def test_roundtrip(Parser, argv): - """ - Test that split is the inverse operation of join - """ - try: - joined = Parser.join(argv) - assert argv == Parser.split(joined) - except NotImplementedError: - pytest.skip("Not implemented") diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py deleted file mode 100644 index 5887abea76bd..000000000000 --- a/numpy/distutils/tests/test_system_info.py +++ /dev/null @@ -1,334 +0,0 @@ -import os -import shutil -import pytest -from tempfile import mkstemp, mkdtemp -from subprocess import Popen, PIPE -import importlib.metadata -from distutils.errors import DistutilsError - -from numpy.testing import assert_, assert_equal, assert_raises -from numpy.distutils import ccompiler, customized_ccompiler -from numpy.distutils.system_info import system_info, ConfigParser, mkl_info -from numpy.distutils.system_info import AliasedOptionError -from numpy.distutils.system_info import default_lib_dirs, default_include_dirs -from numpy.distutils import _shell_utils - - -try: - if importlib.metadata.version('setuptools') >= '60': - # pkg-resources gives deprecation warnings, and there may be more - # issues. We only support setuptools <60 - pytest.skip("setuptools is too new", allow_module_level=True) -except importlib.metadata.PackageNotFoundError: - # we don't require `setuptools`; if it is not found, continue - pass - - -def get_class(name, notfound_action=1): - """ - notfound_action: - 0 - do nothing - 1 - display warning message - 2 - raise error - """ - cl = {'temp1': Temp1Info, - 'temp2': Temp2Info, - 'duplicate_options': DuplicateOptionInfo, - }.get(name.lower(), _system_info) - return cl() - -simple_site = """ -[ALL] -library_dirs = {dir1:s}{pathsep:s}{dir2:s} -libraries = {lib1:s},{lib2:s} -extra_compile_args = -I/fake/directory -I"/path with/spaces" -Os -runtime_library_dirs = {dir1:s} - -[temp1] -library_dirs = {dir1:s} -libraries = {lib1:s} -runtime_library_dirs = {dir1:s} - -[temp2] -library_dirs = {dir2:s} -libraries = {lib2:s} -extra_link_args = -Wl,-rpath={lib2_escaped:s} -rpath = {dir2:s} - -[duplicate_options] -mylib_libs = {lib1:s} -libraries = {lib2:s} -""" -site_cfg = simple_site - -fakelib_c_text = """ -/* This file is generated from numpy/distutils/testing/test_system_info.py */ -#include -void foo(void) { - printf("Hello foo"); -} -void bar(void) { - printf("Hello bar"); -} -""" - -def have_compiler(): - """ Return True if there appears to be an executable compiler - """ - compiler = customized_ccompiler() - try: - cmd = compiler.compiler # Unix compilers - except AttributeError: - try: - if not compiler.initialized: - compiler.initialize() # MSVC is different - except (DistutilsError, ValueError): - return False - cmd = [compiler.cc] - try: - p = Popen(cmd, stdout=PIPE, stderr=PIPE) - p.stdout.close() - p.stderr.close() - p.wait() - except OSError: - return False - return True - - -HAVE_COMPILER = have_compiler() - - -class _system_info(system_info): - - def __init__(self, - default_lib_dirs=default_lib_dirs, - default_include_dirs=default_include_dirs, - verbosity=1, - ): - self.__class__.info = {} - self.local_prefixes = [] - defaults = {'library_dirs': '', - 'include_dirs': '', - 'runtime_library_dirs': '', - 'rpath': '', - 'src_dirs': '', - 'search_static_first': "0", - 'extra_compile_args': '', - 'extra_link_args': ''} - self.cp = ConfigParser(defaults) - # We have to parse the config files afterwards - # to have a consistent temporary filepath - - def _check_libs(self, lib_dirs, libs, opt_libs, exts): - """Override _check_libs to return with all dirs """ - info = {'libraries': libs, 'library_dirs': lib_dirs} - return info - - -class Temp1Info(_system_info): - """For testing purposes""" - section = 'temp1' - - -class Temp2Info(_system_info): - """For testing purposes""" - section = 'temp2' - -class DuplicateOptionInfo(_system_info): - """For testing purposes""" - section = 'duplicate_options' - - -class TestSystemInfoReading: - - def setup_method(self): - """ Create the libraries """ - # Create 2 sources and 2 libraries - self._dir1 = mkdtemp() - self._src1 = os.path.join(self._dir1, 'foo.c') - self._lib1 = os.path.join(self._dir1, 'libfoo.so') - self._dir2 = mkdtemp() - self._src2 = os.path.join(self._dir2, 'bar.c') - self._lib2 = os.path.join(self._dir2, 'libbar.so') - # Update local site.cfg - global simple_site, site_cfg - site_cfg = simple_site.format( - dir1=self._dir1, - lib1=self._lib1, - dir2=self._dir2, - lib2=self._lib2, - pathsep=os.pathsep, - lib2_escaped=_shell_utils.NativeParser.join([self._lib2]) - ) - # Write site.cfg - fd, self._sitecfg = mkstemp() - os.close(fd) - with open(self._sitecfg, 'w') as fd: - fd.write(site_cfg) - # Write the sources - with open(self._src1, 'w') as fd: - fd.write(fakelib_c_text) - with open(self._src2, 'w') as fd: - fd.write(fakelib_c_text) - # We create all class-instances - - def site_and_parse(c, site_cfg): - c.files = [site_cfg] - c.parse_config_files() - return c - self.c_default = site_and_parse(get_class('default'), self._sitecfg) - self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg) - self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg) - self.c_dup_options = site_and_parse(get_class('duplicate_options'), - self._sitecfg) - - def teardown_method(self): - # Do each removal separately - try: - shutil.rmtree(self._dir1) - except Exception: - pass - try: - shutil.rmtree(self._dir2) - except Exception: - pass - try: - os.remove(self._sitecfg) - except Exception: - pass - - def test_all(self): - # Read in all information in the ALL block - tsi = self.c_default - assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2]) - assert_equal(tsi.get_libraries(), [self._lib1, self._lib2]) - assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) - extra = tsi.calc_extra_info() - assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os']) - - def test_temp1(self): - # Read in all information in the temp1 block - tsi = self.c_temp1 - assert_equal(tsi.get_lib_dirs(), [self._dir1]) - assert_equal(tsi.get_libraries(), [self._lib1]) - assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) - - def test_temp2(self): - # Read in all information in the temp2 block - tsi = self.c_temp2 - assert_equal(tsi.get_lib_dirs(), [self._dir2]) - assert_equal(tsi.get_libraries(), [self._lib2]) - # Now from rpath and not runtime_library_dirs - assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2]) - extra = tsi.calc_extra_info() - assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2]) - - def test_duplicate_options(self): - # Ensure that duplicates are raising an AliasedOptionError - tsi = self.c_dup_options - assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries") - assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1]) - assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2]) - - @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") - def test_compile1(self): - # Compile source and link the first source - c = customized_ccompiler() - previousDir = os.getcwd() - try: - # Change directory to not screw up directories - os.chdir(self._dir1) - c.compile([os.path.basename(self._src1)], output_dir=self._dir1) - # Ensure that the object exists - assert_(os.path.isfile(self._src1.replace('.c', '.o')) or - os.path.isfile(self._src1.replace('.c', '.obj'))) - finally: - os.chdir(previousDir) - - @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") - @pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()), - reason="Fails with MSVC compiler ") - def test_compile2(self): - # Compile source and link the second source - tsi = self.c_temp2 - c = customized_ccompiler() - extra_link_args = tsi.calc_extra_info()['extra_link_args'] - previousDir = os.getcwd() - try: - # Change directory to not screw up directories - os.chdir(self._dir2) - c.compile([os.path.basename(self._src2)], output_dir=self._dir2, - extra_postargs=extra_link_args) - # Ensure that the object exists - assert_(os.path.isfile(self._src2.replace('.c', '.o'))) - finally: - os.chdir(previousDir) - - HAS_MKL = "mkl_rt" in mkl_info().calc_libraries_info().get("libraries", []) - - @pytest.mark.xfail(HAS_MKL, reason=("`[DEFAULT]` override doesn't work if " - "numpy is built with MKL support")) - def test_overrides(self): - previousDir = os.getcwd() - cfg = os.path.join(self._dir1, 'site.cfg') - shutil.copy(self._sitecfg, cfg) - try: - os.chdir(self._dir1) - # Check that the '[ALL]' section does not override - # missing values from other sections - info = mkl_info() - lib_dirs = info.cp['ALL']['library_dirs'].split(os.pathsep) - assert info.get_lib_dirs() != lib_dirs - - # But if we copy the values to a '[mkl]' section the value - # is correct - with open(cfg) as fid: - mkl = fid.read().replace('[ALL]', '[mkl]', 1) - with open(cfg, 'w') as fid: - fid.write(mkl) - info = mkl_info() - assert info.get_lib_dirs() == lib_dirs - - # Also, the values will be taken from a section named '[DEFAULT]' - with open(cfg) as fid: - dflt = fid.read().replace('[mkl]', '[DEFAULT]', 1) - with open(cfg, 'w') as fid: - fid.write(dflt) - info = mkl_info() - assert info.get_lib_dirs() == lib_dirs - finally: - os.chdir(previousDir) - - -def test_distutils_parse_env_order(monkeypatch): - from numpy.distutils.system_info import _parse_env_order - env = 'NPY_TESTS_DISTUTILS_PARSE_ENV_ORDER' - - base_order = list('abcdef') - - monkeypatch.setenv(env, 'b,i,e,f') - order, unknown = _parse_env_order(base_order, env) - assert len(order) == 3 - assert order == list('bef') - assert len(unknown) == 1 - - # For when LAPACK/BLAS optimization is disabled - monkeypatch.setenv(env, '') - order, unknown = _parse_env_order(base_order, env) - assert len(order) == 0 - assert len(unknown) == 0 - - for prefix in '^!': - monkeypatch.setenv(env, f'{prefix}b,i,e') - order, unknown = _parse_env_order(base_order, env) - assert len(order) == 4 - assert order == list('acdf') - assert len(unknown) == 1 - - with pytest.raises(ValueError): - monkeypatch.setenv(env, 'b,^e,i') - _parse_env_order(base_order, env) - - with pytest.raises(ValueError): - monkeypatch.setenv(env, '!b,^e,i') - _parse_env_order(base_order, env) diff --git a/numpy/distutils/tests/utilities.py b/numpy/distutils/tests/utilities.py deleted file mode 100644 index 5016a83d2164..000000000000 --- a/numpy/distutils/tests/utilities.py +++ /dev/null @@ -1,90 +0,0 @@ -# Kanged out of numpy.f2py.tests.util for test_build_ext -from numpy.testing import IS_WASM -import textwrap -import shutil -import tempfile -import os -import re -import subprocess -import sys - -# -# Check if compilers are available at all... -# - -_compiler_status = None - - -def _get_compiler_status(): - global _compiler_status - if _compiler_status is not None: - return _compiler_status - - _compiler_status = (False, False, False) - if IS_WASM: - # Can't run compiler from inside WASM. - return _compiler_status - - # XXX: this is really ugly. But I don't know how to invoke Distutils - # in a safer way... - code = textwrap.dedent( - f"""\ - import os - import sys - sys.path = {repr(sys.path)} - - def configuration(parent_name='',top_path=None): - global config - from numpy.distutils.misc_util import Configuration - config = Configuration('', parent_name, top_path) - return config - - from numpy.distutils.core import setup - setup(configuration=configuration) - - config_cmd = config.get_config_cmd() - have_c = config_cmd.try_compile('void foo() {{}}') - print('COMPILERS:%%d,%%d,%%d' %% (have_c, - config.have_f77c(), - config.have_f90c())) - sys.exit(99) - """ - ) - code = code % dict(syspath=repr(sys.path)) - - tmpdir = tempfile.mkdtemp() - try: - script = os.path.join(tmpdir, "setup.py") - - with open(script, "w") as f: - f.write(code) - - cmd = [sys.executable, "setup.py", "config"] - p = subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tmpdir - ) - out, err = p.communicate() - finally: - shutil.rmtree(tmpdir) - - m = re.search(rb"COMPILERS:(\d+),(\d+),(\d+)", out) - if m: - _compiler_status = ( - bool(int(m.group(1))), - bool(int(m.group(2))), - bool(int(m.group(3))), - ) - # Finished - return _compiler_status - - -def has_c_compiler(): - return _get_compiler_status()[0] - - -def has_f77_compiler(): - return _get_compiler_status()[1] - - -def has_f90_compiler(): - return _get_compiler_status()[2] diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py deleted file mode 100644 index 4884960fdf22..000000000000 --- a/numpy/distutils/unixccompiler.py +++ /dev/null @@ -1,141 +0,0 @@ -""" -unixccompiler - can handle very long argument lists for ar. - -""" -import os -import sys -import subprocess -import shlex - -from distutils.errors import CompileError, DistutilsExecError, LibError -from distutils.unixccompiler import UnixCCompiler -from numpy.distutils.ccompiler import replace_method -from numpy.distutils.misc_util import _commandline_dep_string -from numpy.distutils import log - -# Note that UnixCCompiler._compile appeared in Python 2.3 -def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile a single source files with a Unix-style compiler.""" - # HP ad-hoc fix, see ticket 1383 - ccomp = self.compiler_so - if ccomp[0] == 'aCC': - # remove flags that will trigger ANSI-C mode for aCC - if '-Ae' in ccomp: - ccomp.remove('-Ae') - if '-Aa' in ccomp: - ccomp.remove('-Aa') - # add flags for (almost) sane C++ handling - ccomp += ['-AA'] - self.compiler_so = ccomp - # ensure OPT environment variable is read - if 'OPT' in os.environ: - # XXX who uses this? - from sysconfig import get_config_vars - opt = shlex.join(shlex.split(os.environ['OPT'])) - gcv_opt = shlex.join(shlex.split(get_config_vars('OPT')[0])) - ccomp_s = shlex.join(self.compiler_so) - if opt not in ccomp_s: - ccomp_s = ccomp_s.replace(gcv_opt, opt) - self.compiler_so = shlex.split(ccomp_s) - llink_s = shlex.join(self.linker_so) - if opt not in llink_s: - self.linker_so = self.linker_so + shlex.split(opt) - - display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src) - - # gcc style automatic dependencies, outputs a makefile (-MF) that lists - # all headers needed by a c file as a side effect of compilation (-MMD) - if getattr(self, '_auto_depends', False): - deps = ['-MMD', '-MF', obj + '.d'] - else: - deps = [] - - try: - self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps + - extra_postargs, display = display) - except DistutilsExecError as e: - msg = str(e) - raise CompileError(msg) from None - - # add commandline flags to dependency file - if deps: - # After running the compiler, the file created will be in EBCDIC - # but will not be tagged as such. This tags it so the file does not - # have multiple different encodings being written to it - if sys.platform == 'zos': - subprocess.check_output(['chtag', '-tc', 'IBM1047', obj + '.d']) - with open(obj + '.d', 'a') as f: - f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts)) - -replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile) - - -def UnixCCompiler_create_static_lib(self, objects, output_libname, - output_dir=None, debug=0, target_lang=None): - """ - Build a static library in a separate sub-process. - - Parameters - ---------- - objects : list or tuple of str - List of paths to object files used to build the static library. - output_libname : str - The library name as an absolute or relative (if `output_dir` is used) - path. - output_dir : str, optional - The path to the output directory. Default is None, in which case - the ``output_dir`` attribute of the UnixCCompiler instance. - debug : bool, optional - This parameter is not used. - target_lang : str, optional - This parameter is not used. - - Returns - ------- - None - - """ - objects, output_dir = self._fix_object_args(objects, output_dir) - - output_filename = \ - self.library_filename(output_libname, output_dir=output_dir) - - if self._need_link(objects, output_filename): - try: - # previous .a may be screwed up; best to remove it first - # and recreate. - # Also, ar on OS X doesn't handle updating universal archives - os.unlink(output_filename) - except OSError: - pass - self.mkpath(os.path.dirname(output_filename)) - tmp_objects = objects + self.objects - while tmp_objects: - objects = tmp_objects[:50] - tmp_objects = tmp_objects[50:] - display = '%s: adding %d object files to %s' % ( - os.path.basename(self.archiver[0]), - len(objects), output_filename) - self.spawn(self.archiver + [output_filename] + objects, - display = display) - - # Not many Unices required ranlib anymore -- SunOS 4.x is, I - # think the only major Unix that does. Maybe we need some - # platform intelligence here to skip ranlib if it's not - # needed -- or maybe Python's configure script took care of - # it for us, hence the check for leading colon. - if self.ranlib: - display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), - output_filename) - try: - self.spawn(self.ranlib + [output_filename], - display = display) - except DistutilsExecError as e: - msg = str(e) - raise LibError(msg) from None - else: - log.debug("skipping %s (up-to-date)", output_filename) - return - -replace_method(UnixCCompiler, 'create_static_lib', - UnixCCompiler_create_static_lib) diff --git a/numpy/doc/ufuncs.py b/numpy/doc/ufuncs.py index 7324168e1dc8..f97e9ff3f80c 100644 --- a/numpy/doc/ufuncs.py +++ b/numpy/doc/ufuncs.py @@ -113,7 +113,7 @@ output argument is used, the ufunc still returns a reference to the result. >>> x = np.arange(2) - >>> np.add(np.arange(2, dtype=float), np.arange(2, dtype=float), x, + >>> np.add(np.arange(2, dtype=np.float64), np.arange(2, dtype=np.float64), x, ... casting='unsafe') array([0, 2]) >>> x diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index 3e34113edd4f..f1f1261d3d32 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -1,4 +1,5 @@ -# ruff: noqa: ANN401 +# Aliases for builtins shadowed by classes to avoid annotations resolving to class members by ty +from builtins import str as py_str, type as py_type from typing import ( Any, Generic, @@ -7,7 +8,6 @@ from typing import ( Never, NoReturn, Self, - TypeAlias, final, overload, type_check_only, @@ -16,7 +16,7 @@ from typing_extensions import TypeVar import numpy as np -__all__ = [ # noqa: RUF022 +__all__ = [ "BoolDType", "Int8DType", "ByteDType", @@ -52,17 +52,20 @@ __all__ = [ # noqa: RUF022 "StringDType", ] -# Helper base classes (typing-only) +# Type parameters + +_ItemSizeT_co = TypeVar("_ItemSizeT_co", bound=int, default=int, covariant=True) +_NaObjectT_co = TypeVar("_NaObjectT_co", default=Never, covariant=True) -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +# Helper base classes (typing-only) @type_check_only -class _SimpleDType(np.dtype[_ScalarT_co], Generic[_ScalarT_co]): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] - names: None # pyright: ignore[reportIncompatibleVariableOverride] +class _SimpleDType[ScalarT: np.generic](np.dtype[ScalarT]): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + names: None # pyright: ignore[reportIncompatibleVariableOverride] # pyrefly: ignore[bad-override] def __new__(cls, /) -> Self: ... def __getitem__(self, key: Any, /) -> NoReturn: ... @property - def base(self) -> np.dtype[_ScalarT_co]: ... + def base(self) -> np.dtype[ScalarT]: ... @property def fields(self) -> None: ... @property @@ -77,7 +80,7 @@ class _SimpleDType(np.dtype[_ScalarT_co], Generic[_ScalarT_co]): # type: ignore def subdtype(self) -> None: ... @type_check_only -class _LiteralDType(_SimpleDType[_ScalarT_co], Generic[_ScalarT_co]): # type: ignore[misc] +class _LiteralDType[ScalarT_co: np.generic](_SimpleDType[ScalarT_co]): # type: ignore[misc] @property def flags(self) -> L[0]: ... @property @@ -85,21 +88,17 @@ class _LiteralDType(_SimpleDType[_ScalarT_co], Generic[_ScalarT_co]): # type: i # Helper mixins (typing-only): -_KindT_co = TypeVar("_KindT_co", bound=LiteralString, covariant=True) -_CharT_co = TypeVar("_CharT_co", bound=LiteralString, covariant=True) -_NumT_co = TypeVar("_NumT_co", bound=int, covariant=True) - @type_check_only -class _TypeCodes(Generic[_KindT_co, _CharT_co, _NumT_co]): +class _TypeCodes[KindT: LiteralString, CharT: LiteralString, NumT: int]: @final @property - def kind(self) -> _KindT_co: ... + def kind(self) -> KindT: ... @final @property - def char(self) -> _CharT_co: ... + def char(self) -> CharT: ... @final @property - def num(self) -> _NumT_co: ... + def num(self) -> NumT: ... @type_check_only class _NoOrder: @@ -113,17 +112,14 @@ class _NativeOrder: @property def byteorder(self) -> L["="]: ... -_DataSize_co = TypeVar("_DataSize_co", bound=int, covariant=True) -_ItemSize_co = TypeVar("_ItemSize_co", bound=int, covariant=True, default=int) - @type_check_only -class _NBit(Generic[_DataSize_co, _ItemSize_co]): +class _NBit[AlignmentT: int, ItemSizeT: int]: @final @property - def alignment(self) -> _DataSize_co: ... + def alignment(self) -> AlignmentT: ... @final @property - def itemsize(self) -> _ItemSize_co: ... + def itemsize(self) -> ItemSizeT: ... @type_check_only class _8Bit(_NoOrder, _NBit[L[1], L[1]]): ... @@ -238,7 +234,7 @@ class UInt64DType( # type: ignore[misc] def str(self) -> L["u8"]: ... # Standard C-named version/alias: -# NOTE: Don't make these `Final`: it will break stubtest +# NOTE: Don't make these `Final[_]` or a `type _` it will break stubtest ByteDType = Int8DType UByteDType = UInt8DType ShortDType = Int16DType @@ -426,11 +422,11 @@ class ObjectDType( # type: ignore[misc] class BytesDType( # type: ignore[misc] _TypeCodes[L["S"], L["S"], L[18]], _NoOrder, - _NBit[L[1], _ItemSize_co], + _NBit[L[1], _ItemSizeT_co], _SimpleDType[np.bytes_], - Generic[_ItemSize_co], + Generic[_ItemSizeT_co], ): - def __new__(cls, size: _ItemSize_co, /) -> BytesDType[_ItemSize_co]: ... + def __new__[ItemSizeT: int](cls, size: ItemSizeT, /) -> BytesDType[ItemSizeT]: ... @property def hasobject(self) -> L[False]: ... @property @@ -442,11 +438,11 @@ class BytesDType( # type: ignore[misc] class StrDType( # type: ignore[misc] _TypeCodes[L["U"], L["U"], L[19]], _NativeOrder, - _NBit[L[4], _ItemSize_co], + _NBit[L[4], _ItemSizeT_co], _SimpleDType[np.str_], - Generic[_ItemSize_co], + Generic[_ItemSizeT_co], ): - def __new__(cls, size: _ItemSize_co, /) -> StrDType[_ItemSize_co]: ... + def __new__[ItemSizeT: int](cls, size: ItemSizeT, /) -> StrDType[ItemSizeT]: ... @property def hasobject(self) -> L[False]: ... @property @@ -458,12 +454,12 @@ class StrDType( # type: ignore[misc] class VoidDType( # type: ignore[misc] _TypeCodes[L["V"], L["V"], L[20]], _NoOrder, - _NBit[L[1], _ItemSize_co], - np.dtype[np.void], # pyright: ignore[reportGeneralTypeIssues] - Generic[_ItemSize_co], + _NBit[L[1], _ItemSizeT_co], + np.dtype[np.void], # pyright: ignore[reportGeneralTypeIssues] # pyrefly: ignore[invalid-inheritance] + Generic[_ItemSizeT_co], ): # NOTE: `VoidDType(...)` raises a `TypeError` at the moment - def __new__(cls, length: _ItemSize_co, /) -> NoReturn: ... + def __new__(cls, length: _ItemSizeT_co, /) -> NoReturn: ... @property def base(self) -> Self: ... @property @@ -483,9 +479,9 @@ class VoidDType( # type: ignore[misc] # Other: -_DateUnit: TypeAlias = L["Y", "M", "W", "D"] -_TimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"] -_DateTimeUnit: TypeAlias = _DateUnit | _TimeUnit +type _DateUnit = L["Y", "M", "W", "D"] +type _TimeUnit = L["h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"] +type _DateTimeUnit = _DateUnit | _TimeUnit @final class DateTime64DType( # type: ignore[misc] @@ -577,8 +573,6 @@ class TimeDelta64DType( # type: ignore[misc] "m8[as]", ]: ... -_NaObjectT_co = TypeVar("_NaObjectT_co", default=Never, covariant=True) - @final class StringDType( # type: ignore[misc] _TypeCodes[L["T"], L["T"], L[2056]], @@ -617,7 +611,7 @@ class StringDType( # type: ignore[misc] @property def subdtype(self) -> None: ... @property - def type(self) -> type[str]: ... + def type(self) -> py_type[py_str]: ... @property def str(self) -> L["|T8", "|T16"]: ... diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py index e34dd99aec1c..f545c9c5fd84 100644 --- a/numpy/f2py/__init__.py +++ b/numpy/f2py/__init__.py @@ -26,12 +26,6 @@ def get_include(): """ Return the directory that contains the ``fortranobject.c`` and ``.h`` files. - .. note:: - - This function is not needed when building an extension with - `numpy.distutils` directly from ``.f`` and/or ``.pyf`` files - in one go. - Python extension modules built with f2py-generated code need to use ``fortranobject.c`` as a source file, and include the ``fortranobject.h`` header. This function can be used to obtain the directory containing diff --git a/numpy/f2py/_backends/__init__.py b/numpy/f2py/_backends/__init__.py index e91393c14be3..beb2bab2384d 100644 --- a/numpy/f2py/_backends/__init__.py +++ b/numpy/f2py/_backends/__init__.py @@ -2,8 +2,5 @@ def f2py_build_generator(name): if name == "meson": from ._meson import MesonBackend return MesonBackend - elif name == "distutils": - from ._distutils import DistutilsBackend - return DistutilsBackend else: raise ValueError(f"Unknown backend: {name}") diff --git a/numpy/f2py/_backends/__init__.pyi b/numpy/f2py/_backends/__init__.pyi index 43625c68061f..11e3743be541 100644 --- a/numpy/f2py/_backends/__init__.pyi +++ b/numpy/f2py/_backends/__init__.pyi @@ -2,4 +2,4 @@ from typing import Literal as L from ._backend import Backend -def f2py_build_generator(name: L["distutils", "meson"]) -> Backend: ... +def f2py_build_generator(name: L["meson"]) -> Backend: ... diff --git a/numpy/f2py/_backends/_distutils.py b/numpy/f2py/_backends/_distutils.py deleted file mode 100644 index 5c8f1092b568..000000000000 --- a/numpy/f2py/_backends/_distutils.py +++ /dev/null @@ -1,76 +0,0 @@ -import os -import shutil -import sys -import warnings - -from numpy.distutils.core import Extension, setup -from numpy.distutils.misc_util import dict_append -from numpy.distutils.system_info import get_info -from numpy.exceptions import VisibleDeprecationWarning - -from ._backend import Backend - - -class DistutilsBackend(Backend): - def __init__(sef, *args, **kwargs): - warnings.warn( - "\ndistutils has been deprecated since NumPy 1.26.x\n" - "Use the Meson backend instead, or generate wrappers" - " without -c and use a custom build script", - VisibleDeprecationWarning, - stacklevel=2, - ) - super().__init__(*args, **kwargs) - - def compile(self): - num_info = {} - if num_info: - self.include_dirs.extend(num_info.get("include_dirs", [])) - ext_args = { - "name": self.modulename, - "sources": self.sources, - "include_dirs": self.include_dirs, - "library_dirs": self.library_dirs, - "libraries": self.libraries, - "define_macros": self.define_macros, - "undef_macros": self.undef_macros, - "extra_objects": self.extra_objects, - "f2py_options": self.f2py_flags, - } - - if self.sysinfo_flags: - for n in self.sysinfo_flags: - i = get_info(n) - if not i: - print( - f"No {n!r} resources found" - "in system (try `f2py --help-link`)" - ) - dict_append(ext_args, **i) - - ext = Extension(**ext_args) - - sys.argv = [sys.argv[0]] + self.setup_flags - sys.argv.extend( - [ - "build", - "--build-temp", - self.build_dir, - "--build-base", - self.build_dir, - "--build-platlib", - ".", - "--disable-optimization", - ] - ) - - if self.fc_flags: - sys.argv.extend(["config_fc"] + self.fc_flags) - if self.flib_flags: - sys.argv.extend(["build_ext"] + self.flib_flags) - - setup(ext_modules=[ext]) - - if self.remove_build_dir and os.path.exists(self.build_dir): - print(f"Removing build directory {self.build_dir}") - shutil.rmtree(self.build_dir) diff --git a/numpy/f2py/_backends/_distutils.pyi b/numpy/f2py/_backends/_distutils.pyi deleted file mode 100644 index 56bbf7e5b49a..000000000000 --- a/numpy/f2py/_backends/_distutils.pyi +++ /dev/null @@ -1,13 +0,0 @@ -from typing_extensions import deprecated, override - -from ._backend import Backend - -class DistutilsBackend(Backend): - @deprecated( - "distutils has been deprecated since NumPy 1.26.x. Use the Meson backend instead, or generate wrappers without -c and " - "use a custom build script" - ) - # NOTE: the `sef` typo matches runtime - def __init__(sef, *args: object, **kwargs: object) -> None: ... - @override - def compile(self) -> None: ... diff --git a/numpy/f2py/_backends/_meson.pyi b/numpy/f2py/_backends/_meson.pyi index 5c85c61586fc..55ff9f7ae78d 100644 --- a/numpy/f2py/_backends/_meson.pyi +++ b/numpy/f2py/_backends/_meson.pyi @@ -1,7 +1,6 @@ from collections.abc import Callable from pathlib import Path -from typing import Final, Literal as L -from typing_extensions import override +from typing import Final, Literal as L, override from ._backend import Backend diff --git a/numpy/f2py/_backends/meson.build.template b/numpy/f2py/_backends/meson.build.template index 58c6758cc503..e01598f185e7 100644 --- a/numpy/f2py/_backends/meson.build.template +++ b/numpy/f2py/_backends/meson.build.template @@ -7,6 +7,11 @@ project('${modulename}', 'buildtype=${buildtype}' ]) fc = meson.get_compiler('fortran') +cc = meson.get_compiler('c') + +add_project_arguments( + cc.get_supported_arguments( '-fno-strict-aliasing'), language : 'c' +) py = import('python').find_installation('''${python}''', pure: false) py_dep = py.dependency() diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index a5af31d976ec..cc3889c192c9 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -627,16 +627,16 @@ def __call__(self, var): def l_and(*f): l1, l2 = 'lambda v', [] for i in range(len(f)): - l1 = '%s,f%d=f[%d]' % (l1, i, i) - l2.append('f%d(v)' % (i)) + l1 = f'{l1},f{i}=f[{i}]' + l2.append(f'f{i}(v)') return eval(f"{l1}:{' and '.join(l2)}") def l_or(*f): l1, l2 = 'lambda v', [] for i in range(len(f)): - l1 = '%s,f%d=f[%d]' % (l1, i, i) - l2.append('f%d(v)' % (i)) + l1 = f'{l1},f{i}=f[{i}]' + l2.append(f'f{i}(v)') return eval(f"{l1}:{' or '.join(l2)}") @@ -988,17 +988,18 @@ def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose=False): if v1 in c2py_map: if k1 in f2cmap_all[k]: outmess( - "\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n" - % (k, k1, f2cmap_all[k][k1], v1) + "\tWarning: redefinition of " + f"{{'{k}':{{'{k1}':'{f2cmap_all[k][k1]}'->'{v1}'}}}}\n" ) f2cmap_all[k][k1] = v1 if verbose: outmess(f'\tMapping "{k}(kind={k1})" to "{v1}\"\n') f2cmap_mapped.append(v1) elif verbose: + c2py_map_keys = list(c2py_map.keys()) errmess( - "\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" - % (k, k1, v1, v1, list(c2py_map.keys())) + f"\tIgnoring map {{'{k}':{{'{k1}':'{v1}'}}}}: '{v1}' " + f"must be in {c2py_map_keys}\n" ) return f2cmap_all, f2cmap_mapped diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi index 32e381cf9a1c..fbf0ad764aae 100644 --- a/numpy/f2py/auxfuncs.pyi +++ b/numpy/f2py/auxfuncs.pyi @@ -1,7 +1,7 @@ from _typeshed import FileDescriptorOrPath from collections.abc import Callable, Mapping from pprint import pprint as show -from typing import Any, Final, Literal as L, Never, TypeAlias, TypeVar, overload +from typing import Any, Final, Literal as L, Never, overload from .cfuncs import errmess @@ -106,15 +106,12 @@ __all__ = [ ### -_VT = TypeVar("_VT") -_RT = TypeVar("_RT") +type _Var = Mapping[str, list[str]] +type _ROut = Mapping[str, str] +type _F2CMap = Mapping[str, Mapping[str, str]] -_Var: TypeAlias = Mapping[str, list[str]] -_ROut: TypeAlias = Mapping[str, str] -_F2CMap: TypeAlias = Mapping[str, Mapping[str, str]] - -_Bool: TypeAlias = bool | L[0, 1] -_Intent: TypeAlias = L[ +type _Bool = bool | L[0, 1] +type _Intent = L[ "INTENT_IN", "INTENT_OUT", "INTENT_INOUT", @@ -140,9 +137,9 @@ class throw_error: def __call__(self, /, var: _Var) -> Never: ... # raises F2PYError # -def l_and(*f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... -def l_or(*f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... -def l_not(f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... +def l_and[VT, RT](*f: tuple[str, Callable[[VT], RT]]) -> Callable[[VT], RT]: ... +def l_or[VT, RT](*f: tuple[str, Callable[[VT], RT]]) -> Callable[[VT], RT]: ... +def l_not[VT, RT](f: tuple[str, Callable[[VT], RT]]) -> Callable[[VT], RT]: ... # def outmess(t: str) -> None: ... diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index 290ac2f467ad..552488a14313 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -70,7 +70,7 @@ 'unsigned_long_long': 'NPY_ULONGLONG', 'complex_float': 'NPY_CFLOAT', 'complex_double': 'NPY_CDOUBLE', - 'complex_long_double': 'NPY_CDOUBLE', + 'complex_long_double': 'NPY_CLONGDOUBLE', 'string': 'NPY_STRING', 'character': 'NPY_STRING'} @@ -215,8 +215,9 @@ def getctype(var): try: ctype = f2cmap[var['kindselector']['*']] except KeyError: - errmess('getctype: "%s %s %s" not supported.\n' % - (var['typespec'], '*', var['kindselector']['*'])) + raw_typespec = var['typespec'] + star = var['kindselector']['*'] + errmess(f'getctype: "{raw_typespec} * {star}" not supported.\n') elif 'kind' in var['kindselector']: if typespec + 'kind' in f2cmap_all: f2cmap = f2cmap_all[typespec + 'kind'] @@ -228,9 +229,11 @@ def getctype(var): try: ctype = f2cmap[str(var['kindselector']['kind'])] except KeyError: - errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="")) in %s/.f2py_f2cmap file).\n' - % (typespec, var['kindselector']['kind'], ctype, - typespec, var['kindselector']['kind'], os.getcwd())) + kind = var['kindselector']['kind'] + errmess(f'getctype: "{typespec}({kind=})" is mapped to C ' + f'"{ctype}" (to override define {{{typespec!r}: ' + f'{{{kind!r}: ""}}}} ' + f'in {os.getcwd()}/.f2py_f2cmap file).\n') elif not isexternal(var): errmess(f'getctype: No C-type found in "{var}", assuming void.\n') return ctype @@ -273,8 +276,8 @@ def getstrlength(var): len = f2cexpr(a['len']) if re.match(r'\(\s*(\*|:)\s*\)', len) or re.match(r'(\*|:)', len): if isintent_hide(var): - errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % ( - repr(var))) + errmess(f'getstrlength:intent(hide): expected a string with defined length ' + f'but got: {var!r}\n') len = '-1' return len @@ -305,7 +308,7 @@ def getarrdims(a, var, verbose=0): v = [dim[i]] else: for va in depargs: - if re.match(r'.*?\b%s\b.*' % va, dim[i]): + if re.match(rf'.*?\b{va}\b.*', dim[i]): v.append(va) for va in v: if depargs.index(va) > depargs.index(a): @@ -315,21 +318,18 @@ def getarrdims(a, var, verbose=0): for d in dim: i = i + 1 if d not in ['*', ':', '(*)', '(:)']: - ret['setdims'] = '%s#varname#_Dims[%d]=%s,' % ( - ret['setdims'], i, d) + ret['setdims'] = f"{ret['setdims']}#varname#_Dims[{i}]={d}," if ret['setdims']: ret['setdims'] = ret['setdims'][:-1] ret['cbsetdims'], i = '', -1 for d in var['dimension']: i = i + 1 if d not in ['*', ':', '(*)', '(:)']: - ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( - ret['cbsetdims'], i, d) + ret['cbsetdims'] = f"{ret['cbsetdims']}#varname#_Dims[{i}]={d}," elif isintent_in(var): - outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' - % (d)) - ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( - ret['cbsetdims'], i, 0) + outmess('getarrdims:warning: assumed shape array, using 0 ' + f'instead of {d!r}\n') + ret['cbsetdims'] = f"{ret['cbsetdims']}#varname#_Dims[{i}]={0}," elif verbose: errmess( f'getarrdims: If in call-back function: array argument {repr(a)} must have bounded dimensions: got {repr(d)}\n') @@ -372,31 +372,30 @@ def getpydocsign(a, var): init = f', optional\\n Default: {showinit}' if isscalar(var): if isintent_inout(var): - sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype], - c2pycode_map[ctype], init) + sig = (f"{a} : {opt} rank-0 array({c2py_map[ctype]}," + f"'{c2pycode_map[ctype]}'){init}") else: sig = f'{a} : {opt} {c2py_map[ctype]}{init}' sigout = f'{out_a} : {c2py_map[ctype]}' elif isstring(var): if isintent_inout(var): - sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % ( - a, opt, getstrlength(var), init) + sig = (f"{a} : {opt} rank-0 array(string(len={getstrlength(var)})," + f"'c'){init}") else: sig = f'{a} : {opt} string(len={getstrlength(var)}){init}' sigout = f'{out_a} : string(len={getstrlength(var)})' elif isarray(var): dim = var['dimension'] rank = repr(len(dim)) - sig = '%s : %s rank-%s array(\'%s\') with bounds (%s)%s' % (a, opt, rank, - c2pycode_map[ - ctype], - ','.join(dim), init) + dim_str = ','.join(dim) + sig = (f"{a} : {opt} rank-{rank} array('{c2pycode_map[ctype]}') with " + f"bounds ({dim_str}){init}") if a == out_a: - sigout = '%s : rank-%s array(\'%s\') with bounds (%s)'\ - % (a, rank, c2pycode_map[ctype], ','.join(dim)) + sigout = (f"{a} : rank-{rank} array('{c2pycode_map[ctype]}') with " + f"bounds ({dim_str})") else: - sigout = '%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ - % (out_a, rank, c2pycode_map[ctype], ','.join(dim), a) + sigout = (f"{out_a} : rank-{rank} array('{c2pycode_map[ctype]}') with " + f"bounds ({dim_str}) and {a} storage") elif isexternal(var): ua = '' if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: @@ -422,10 +421,9 @@ def getarrdocsign(a, var): elif isarray(var): dim = var['dimension'] rank = repr(len(dim)) - sig = '%s : rank-%s array(\'%s\') with bounds (%s)' % (a, rank, - c2pycode_map[ - ctype], - ','.join(dim)) + dim_str = ','.join(dim) + sig = (f"{a} : rank-{rank} array('{c2pycode_map[ctype]}') with " + f"bounds ({dim_str})") return sig @@ -457,7 +455,8 @@ def getinit(a, var): if not init: init, showinit = '""', "''" if init[0] == "'": - init = '"%s"' % (init[1:-1].replace('"', '\\"')) + escaped_init = init[1:-1].replace('"', '\\"') + init = f'"{escaped_init}"' if init[0] == '"': showinit = f"'{init[1:-1]}'" return init, showinit @@ -522,8 +521,9 @@ def sign2map(a, var): ret['cblatexdocstr'] = lcb2_map[lcb_map[a]]['latexdocstr'] else: ret['cbname'] = a - errmess('sign2map: Confused: external %s is not in lcb_map%s.\n' % ( - a, list(lcb_map.keys()))) + lcb_map_keys = list(lcb_map.keys()) + errmess(f'sign2map: Confused: external {a} is not in ' + f'lcb_map{lcb_map_keys}.\n') if isstring(var): ret['length'] = getstrlength(var) if isarray(var): @@ -557,24 +557,24 @@ def sign2map(a, var): ddim = ','.join( map(lambda x, y: f'{x}|{y}', var['dimension'], dim)) rl.append(f'dims({ddim})') + rl_str = ','.join(rl) if isexternal(var): - ret['vardebuginfo'] = f"debug-capi:{a}=>{ret['cbname']}:{','.join(rl)}" + ret['vardebuginfo'] = f"debug-capi:{a}=>{ret['cbname']}:{rl_str}" else: - ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % ( - ret['ctype'], a, ret['showinit'], ','.join(rl)) + ret['vardebuginfo'] = (f"debug-capi:{ret['ctype']} " + f"{a}={ret['showinit']}:{rl_str}") if isscalar(var): if ret['ctype'] in cformat_map: ret['vardebugshowvalue'] = f"debug-capi:{a}={cformat_map[ret['ctype']]}" if isstring(var): - ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( - a, a) + ret['vardebugshowvalue'] = f'debug-capi:slen({a})=%d {a}=\\"%s\\"' if isexternal(var): ret['vardebugshowvalue'] = f'debug-capi:{a}=%p' if ret['ctype'] in cformat_map: ret['varshowvalue'] = f"#name#:{a}={cformat_map[ret['ctype']]}" ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" if isstring(var): - ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\"%%s\\"' % (a, a) + ret['varshowvalue'] = f'#name#:slen({a})=%d {a}=\\"%s\\"' ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) if hasnote(var): ret['note'] = var['note'] @@ -623,8 +623,9 @@ def routsign2map(rout): break lcb_map[ln] = un[1] elif rout.get('externals'): - errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n' % ( - ret['name'], repr(rout['externals']))) + externals = rout['externals'] + errmess(f"routsign2map: Confused: function {ret['name']} has externals " + f'{externals!r} but no "use" statement.\n') ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' if isfunction(rout): if 'result' in rout: @@ -641,20 +642,19 @@ def routsign2map(rout): ret['rformat'] = c2buildvalue_map[ret['ctype']] else: ret['rformat'] = 'O' - errmess('routsign2map: no c2buildvalue key for type %s\n' % - (repr(ret['ctype']))) + errmess(f"routsign2map: no c2buildvalue key for type {ret['ctype']!r}\n") if debugcapi(rout): if ret['ctype'] in cformat_map: - ret['routdebugshowvalue'] = 'debug-capi:%s=%s' % ( - a, cformat_map[ret['ctype']]) + ret['routdebugshowvalue'] = ("debug-capi:" + f"{a}={cformat_map[ret['ctype']]}") if isstringfunction(rout): - ret['routdebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( - a, a) + ret['routdebugshowvalue'] = f'debug-capi:slen({a})=%d {a}=\\"%s\\"' if isstringfunction(rout): ret['rlength'] = getstrlength(rout['vars'][a]) if ret['rlength'] == '-1': - errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n' % ( - repr(rout['name']))) + errmess("routsign2map: expected explicit specification of the length " + "of the string returned by the fortran function " + f"{rout['name']!r}; taking 10.\n") ret['rlength'] = '10' if hasnote(rout): ret['note'] = rout['note'] diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py index 238d473113e0..dcc75ec6f969 100644 --- a/numpy/f2py/cb_rules.py +++ b/numpy/f2py/cb_rules.py @@ -143,31 +143,15 @@ goto capi_fail; } #setdims# -#ifdef PYPY_VERSION -#define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value) - capi_arglist_list = PySequence_List((PyObject *)capi_arglist); - if (capi_arglist_list == NULL) goto capi_fail; -#else #define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value) -#endif #pyobjfrom# #undef CAPI_ARGLIST_SETITEM -#ifdef PYPY_VERSION - CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist_list); -#else - CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); -#endif - CFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); +CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); +CFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); #ifdef F2PY_REPORT_ATEXIT f2py_cb_start_call_clock(); #endif -#ifdef PYPY_VERSION - capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist_list); - Py_DECREF(capi_arglist_list); - capi_arglist_list = NULL; -#else - capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist); -#endif +capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist); #ifdef F2PY_REPORT_ATEXIT f2py_cb_stop_call_clock(); #endif diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index b2b1cad3d867..a70b2b59f5a8 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -89,10 +89,9 @@ def errmess(s: str) -> None: typedef long double long_double; #endif """ -typedefs[ - 'complex_long_double'] = 'typedef struct {long double r,i;} complex_long_double;' -typedefs['complex_float'] = 'typedef struct {float r,i;} complex_float;' -typedefs['complex_double'] = 'typedef struct {double r,i;} complex_double;' +typedefs['complex_long_double'] = 'typedef union { struct {long double r,i;}; npy_clongdouble _npy; } complex_long_double;' +typedefs['complex_float'] = 'typedef union { struct {float r,i;}; npy_cfloat _npy; } complex_float;' +typedefs['complex_double'] = 'typedef union { struct {double r,i;}; npy_cdouble _npy; } complex_double;' typedefs['string'] = """typedef char * string;""" typedefs['character'] = """typedef char character;""" @@ -288,15 +287,15 @@ def errmess(s: str) -> None: #define pyobj_from_double1(v) (PyFloat_FromDouble(v))""" cppmacros['pyobj_from_float1'] = """ #define pyobj_from_float1(v) (PyFloat_FromDouble(v))""" -needs['pyobj_from_complex_long_double1'] = ['complex_long_double'] +needs['pyobj_from_complex_long_double1'] = ['complex_long_double', 'npy_math.h'] cppmacros['pyobj_from_complex_long_double1'] = """ -#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))""" -needs['pyobj_from_complex_double1'] = ['complex_double'] +#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles((double)npy_creall(v._npy),(double)npy_cimagl(v._npy)))""" +needs['pyobj_from_complex_double1'] = ['complex_double', 'npy_math.h'] cppmacros['pyobj_from_complex_double1'] = """ -#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))""" -needs['pyobj_from_complex_float1'] = ['complex_float'] +#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(npy_creal(v._npy),npy_cimag(v._npy)))""" +needs['pyobj_from_complex_float1'] = ['complex_float', 'npy_math.h'] cppmacros['pyobj_from_complex_float1'] = """ -#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))""" +#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles((double)npy_crealf(v._npy),(double)npy_cimagf(v._npy)))""" needs['pyobj_from_string1'] = ['string'] cppmacros['pyobj_from_string1'] = """ #define pyobj_from_string1(v) (PyUnicode_FromString((char *)v))""" @@ -340,42 +339,56 @@ def errmess(s: str) -> None: return 1 """ -needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] +needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR', 'npy_math.h'] cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """ #define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break; -#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ +/* Helper to extract real/imag from complex value via cast to npy_cdouble */ +static inline double _f2py_creal_as_double(const void *p, int typenum) { + if (typenum == NPY_CFLOAT) return (double)npy_crealf(*(const npy_cfloat *)p); + if (typenum == NPY_CLONGDOUBLE) return (double)npy_creall(*(const npy_clongdouble *)p); + return npy_creal(*(const npy_cdouble *)p); +} +static inline double _f2py_cimag_as_double(const void *p, int typenum) { + if (typenum == NPY_CFLOAT) return (double)npy_cimagf(*(const npy_cfloat *)p); + if (typenum == NPY_CLONGDOUBLE) return (double)npy_cimagl(*(const npy_clongdouble *)p); + return npy_cimag(*(const npy_cdouble *)p); +} +#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typenum)\\ PyArrayObject *arr = NULL;\\ + double _re, _im;\\ if (!obj) return -2;\\ if (!PyArray_Check(obj)) return -1;\\ if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ - if (PyArray_DESCR(arr)->type==typecode) {\\ - *(ctype *)(PyArray_DATA(arr))=(*v).r;\\ - *(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(*v).i;\\ + _re = _f2py_creal_as_double(v, typenum);\\ + _im = _f2py_cimag_as_double(v, typenum);\\ + if (PyArray_TYPE(arr)==typenum) {\\ + *(ctype *)(PyArray_DATA(arr))=(ctype)_re;\\ + *(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(ctype)_im;\\ return 1;\\ }\\ switch (PyArray_TYPE(arr)) {\\ - case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r;\\ - *(npy_double *)(PyArray_DATA(arr)+sizeof(npy_double))=(*v).i;\\ + case NPY_CDOUBLE: npy_csetreal((npy_cdouble *)PyArray_DATA(arr), _re);\\ + npy_csetimag((npy_cdouble *)PyArray_DATA(arr), _im);\\ break;\\ - case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r;\\ - *(npy_float *)(PyArray_DATA(arr)+sizeof(npy_float))=(*v).i;\\ + case NPY_CFLOAT: npy_csetrealf((npy_cfloat *)PyArray_DATA(arr), (float)_re);\\ + npy_csetimagf((npy_cfloat *)PyArray_DATA(arr), (float)_im);\\ break;\\ - case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\ - case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;\\ - *(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;\\ + case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(_re!=0 && _im!=0); break;\\ + case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_CLONGDOUBLE: npy_csetreall((npy_clongdouble *)PyArray_DATA(arr), (npy_longdouble)_re);\\ + npy_csetimagl((npy_clongdouble *)PyArray_DATA(arr), (npy_longdouble)_im);\\ break;\\ case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\ default: return -2;\\ @@ -522,7 +535,7 @@ def errmess(s: str) -> None: #define CHECKSTRING(check,tcheck,name,show,var)\\ if (!(check)) {\\ char errstring[256];\\ - sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ + snprintf(errstring, sizeof(errstring), \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ PyErr_SetString(#modulename#_error, errstring);\\ /*goto capi_fail;*/\\ } else """ @@ -530,7 +543,7 @@ def errmess(s: str) -> None: #define CHECKSCALAR(check,tcheck,name,show,var)\\ if (!(check)) {\\ char errstring[256];\\ - sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ + snprintf(errstring, sizeof(errstring), \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ PyErr_SetString(#modulename#_error,errstring);\\ /*goto capi_fail;*/\\ } else """ @@ -839,7 +852,8 @@ def errmess(s: str) -> None: Py_INCREF(err); PyErr_Clear(); } - sprintf(mess + strlen(mess), + size_t len = strlen(mess); + snprintf(mess + len, F2PY_MESSAGE_BUFFER_SIZE - len, " -- expected str|bytes|sequence-of-str-or-bytes, got "); f2py_describe(obj, mess + strlen(mess)); PyErr_SetString(err, mess); @@ -1133,7 +1147,7 @@ def errmess(s: str) -> None: static int complex_long_double_from_pyobj(complex_long_double* v, PyObject *obj, const char *errmess) { - complex_double cd = {0.0,0.0}; + complex_double cd = {.r=0, .i=0}; if (PyArray_CheckScalar(obj)){ if PyArray_IsScalar(obj, CLongDouble) { PyArray_ScalarAsCtype(obj, v); @@ -1142,15 +1156,16 @@ def errmess(s: str) -> None: else if (PyArray_Check(obj)) { PyArrayObject *arr = (PyArrayObject *)obj; if (PyArray_TYPE(arr)==NPY_CLONGDOUBLE) { - (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(arr)))); - (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(arr)))); + npy_clongdouble tmp = *(npy_clongdouble *)PyArray_DATA(arr); + npy_csetreall(&v->_npy, npy_creall(tmp)); + npy_csetimagl(&v->_npy, npy_cimagl(tmp)); return 1; } } } if (complex_double_from_pyobj(&cd,obj,errmess)) { - (*v).r = (long_double)cd.r; - (*v).i = (long_double)cd.i; + npy_csetreall(&v->_npy, (long_double)npy_creal(cd._npy)); + npy_csetimagl(&v->_npy, (long_double)npy_cimag(cd._npy)); return 1; } return 0; @@ -1165,22 +1180,22 @@ def errmess(s: str) -> None: Py_complex c; if (PyComplex_Check(obj)) { c = PyComplex_AsCComplex(obj); - (*v).r = c.real; - (*v).i = c.imag; + npy_csetreal(&v->_npy, c.real); + npy_csetimag(&v->_npy, c.imag); return 1; } if (PyArray_IsScalar(obj, ComplexFloating)) { if (PyArray_IsScalar(obj, CFloat)) { - npy_cfloat new; - PyArray_ScalarAsCtype(obj, &new); - (*v).r = (double)npy_crealf(new); - (*v).i = (double)npy_cimagf(new); + npy_cfloat tmp; + PyArray_ScalarAsCtype(obj, &tmp); + npy_csetreal(&v->_npy, (double)npy_crealf(tmp)); + npy_csetimag(&v->_npy, (double)npy_cimagf(tmp)); } else if (PyArray_IsScalar(obj, CLongDouble)) { - npy_clongdouble new; - PyArray_ScalarAsCtype(obj, &new); - (*v).r = (double)npy_creall(new); - (*v).i = (double)npy_cimagl(new); + npy_clongdouble tmp; + PyArray_ScalarAsCtype(obj, &tmp); + npy_csetreal(&v->_npy, (double)npy_creall(tmp)); + npy_csetimag(&v->_npy, (double)npy_cimagl(tmp)); } else { /* if (PyArray_IsScalar(obj, CDouble)) */ PyArray_ScalarAsCtype(obj, v); @@ -1198,20 +1213,21 @@ def errmess(s: str) -> None: if (arr == NULL) { return 0; } - (*v).r = npy_creal(*(((npy_cdouble *)PyArray_DATA(arr)))); - (*v).i = npy_cimag(*(((npy_cdouble *)PyArray_DATA(arr)))); + npy_cdouble tmp = *(npy_cdouble *)PyArray_DATA(arr); + npy_csetreal(&v->_npy, npy_creal(tmp)); + npy_csetimag(&v->_npy, npy_cimag(tmp)); Py_DECREF(arr); return 1; } /* Python does not provide PyNumber_Complex function :-( */ - (*v).i = 0.0; + npy_csetimag(&v->_npy, 0.0); if (PyFloat_Check(obj)) { - (*v).r = PyFloat_AsDouble(obj); - return !((*v).r == -1.0 && PyErr_Occurred()); + npy_csetreal(&v->_npy, PyFloat_AsDouble(obj)); + return !(npy_creal(v->_npy) == -1.0 && PyErr_Occurred()); } if (PyLong_Check(obj)) { - (*v).r = PyLong_AsDouble(obj); - return !((*v).r == -1.0 && PyErr_Occurred()); + npy_csetreal(&v->_npy, PyLong_AsDouble(obj)); + return !(npy_creal(v->_npy) == -1.0 && PyErr_Occurred()); } if (PySequence_Check(obj) && !(PyBytes_Check(obj) || PyUnicode_Check(obj))) { PyObject *tmp = PySequence_GetItem(obj,0); @@ -1240,10 +1256,10 @@ def errmess(s: str) -> None: static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) { - complex_double cd={0.0,0.0}; + complex_double cd = {.r=0, .i=0}; if (complex_double_from_pyobj(&cd,obj,errmess)) { - (*v).r = (float)cd.r; - (*v).i = (float)cd.i; + npy_csetrealf(&v->_npy, (float)npy_creal(cd._npy)); + npy_csetimagf(&v->_npy, (float)npy_cimag(cd._npy)); return 1; } return 0; @@ -1307,11 +1323,11 @@ def errmess(s: str) -> None: needs['try_pyarr_from_complex_float'] = [ 'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float'] cfuncs[ - 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' + 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,NPY_CFLOAT);\n}\n' needs['try_pyarr_from_complex_double'] = [ 'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double'] cfuncs[ - 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' + 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,NPY_CDOUBLE);\n}\n' needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX'] @@ -1346,7 +1362,7 @@ def errmess(s: str) -> None: Py_INCREF(tmp_fun); tot = maxnofargs; if (PyCFunction_Check(fun)) { - /* In case the function has a co_argcount (like on PyPy) */ + /* In case the function has a co_argcount */ di = 0; } if (xa != NULL) diff --git a/numpy/f2py/cfuncs.pyi b/numpy/f2py/cfuncs.pyi index 5887177752c3..2187368797a4 100644 --- a/numpy/f2py/cfuncs.pyi +++ b/numpy/f2py/cfuncs.pyi @@ -1,11 +1,11 @@ -from typing import Final, TypeAlias +from typing import Final from .__version__ import version ### -_NeedListDict: TypeAlias = dict[str, list[str]] -_NeedDict: TypeAlias = dict[str, str] +type _NeedListDict = dict[str, list[str]] +type _NeedDict = dict[str, str] ### diff --git a/numpy/f2py/common_rules.py b/numpy/f2py/common_rules.py index cef757b6c5a3..356f2a4f6355 100644 --- a/numpy/f2py/common_rules.py +++ b/numpy/f2py/common_rules.py @@ -64,12 +64,14 @@ def dadd(line, s=doc): hnames.append(n) else: inames.append(n) + hnames_str = ','.join(hnames) + inames_str = ','.join(inames) if hnames: - outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n' % ( - name, ','.join(inames), ','.join(hnames))) + outmess(f'\t\tConstructing COMMON block support for "{name}"...\n\t\t ' + f'{inames_str}\n\t\t Hidden: {hnames_str}\n') else: - outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n' % ( - name, ','.join(inames))) + outmess(f'\t\tConstructing COMMON block support for "{name}"...\n\t\t ' + f'{inames_str}\n') fadd(f'subroutine f2pyinit{name}(setupfunc)') for usename in getuseblocks(m): fadd(f'use {usename}') @@ -82,7 +84,7 @@ def dadd(line, s=doc): fadd(f"common /{name}/ {','.join(vnames)}") fadd(f"call setupfunc({','.join(inames)})") fadd('end\n') - cadd('static FortranDataDef f2py_%s_def[] = {' % (name)) + cadd(f'static FortranDataDef f2py_{name}_def[] = {{') idims = [] for n in inames: ct = capi_maps.getctype(vars[n]) @@ -96,12 +98,12 @@ def dadd(line, s=doc): dms = dm['dims'].strip() if not dms: dms = '-1' - cadd('\t{\"%s\",%s,{{%s}},%s, %s},' - % (n, dm['rank'], dms, at, elsize)) + rank = dm['rank'] + cadd(f'\t{{\"{n}\",{rank},{{{{{dms}}}}},{at}, {elsize}}},') cadd('\t{NULL}\n};') inames1 = rmbadname(inames) inames1_tps = ','.join(['char *' + s for s in inames1]) - cadd('static void f2py_setup_%s(%s) {' % (name, inames1_tps)) + cadd(f'static void f2py_setup_{name}({inames1_tps}) {{') cadd('\tint i_f2py=0;') for n in inames1: cadd(f'\tf2py_{name}_def[i_f2py++].data = {n};') @@ -110,23 +112,23 @@ def dadd(line, s=doc): F_FUNC = 'F_FUNC_US' else: F_FUNC = 'F_FUNC' - cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));' - % (F_FUNC, lower_name, name.upper(), - ','.join(['char*'] * len(inames1)))) - cadd('static void f2py_init_%s(void) {' % name) - cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' - % (F_FUNC, lower_name, name.upper(), name)) + arg_types_str = ','.join(['char*'] * len(inames1)) + cadd(f"extern void {F_FUNC}(f2pyinit{lower_name},F2PYINIT{name.upper()})" + f"(void(*)({arg_types_str}));") + cadd(f'static void f2py_init_{name}(void) {{') + cadd(f'\t{F_FUNC}(f2pyinit{lower_name},F2PYINIT{name.upper()})' + f'(f2py_setup_{name});') cadd('}\n') iadd(f'\ttmp = PyFortranObject_New(f2py_{name}_def,f2py_init_{name});') iadd('\tif (tmp == NULL) return NULL;') iadd(f'\tif (F2PyDict_SetItemString(d, "{name}", tmp) == -1) return NULL;') iadd('\tPy_DECREF(tmp);') tname = name.replace('_', '\\_') - dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname)) + dadd(f'\\subsection{{Common block \\texttt{{{tname}}}}}\n') dadd('\\begin{description}') for n in inames: - dadd('\\item[]{{}\\verb@%s@{}}' % - (capi_maps.getarrdocsign(n, vars[n]))) + docsign = capi_maps.getarrdocsign(n, vars[n]) + dadd(f'\\item[]{{{{}}\\verb@{docsign}@{{}}}}') if hasnote(vars[n]): note = vars[n]['note'] if isinstance(note, list): diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 22d804389ad4..1f89aba8a841 100644 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -243,7 +243,6 @@ def outmess(line, flag=1): sys.stdout.write(line) -re._MAXCACHE = 50 defaultimplicitrules = {} for c in "abcdefghopqrstuvwxyz$_": defaultimplicitrules[c] = {'typespec': 'real'} @@ -413,9 +412,8 @@ def readfortrancode(ffile, dowithline=show, istop=1): beginpattern = beginpattern77 else: beginpattern = beginpattern90 - outmess('\tReading file %s (format:%s%s)\n' - % (repr(currentfilename), sourcecodeform, - (strictf77 and ',strict') or '')) + outmess(f"\tReading file {currentfilename!r} " + f"(format:{sourcecodeform}{',strict' if strictf77 else ''})\n") l = l.expandtabs().replace('\xa0', ' ') # Get rid of newline characters @@ -461,7 +459,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): if l[0] not in spacedigits: raise Exception('readfortrancode: Found non-(space,digit) char ' 'in the first column.\n\tAre you sure that ' - 'this code is in fix form?\n\tline=%s' % repr(l)) + f'this code is in fix form?\n\tline={l!r}') if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '): # Continuation of a previous line @@ -520,8 +518,8 @@ def readfortrancode(ffile, dowithline=show, istop=1): else: raise ValueError( f"Flag sourcecodeform must be either 'fix' or 'free': {repr(sourcecodeform)}") - filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( - fin.filelineno() - 1, currentfilename, l1) + filepositiontext = (f'Line #{fin.filelineno() - 1} ' + f'in {currentfilename}:"{l1}"\n\t') m = includeline.match(origfinalline) if m: fn = m.group('name') @@ -538,8 +536,8 @@ def readfortrancode(ffile, dowithline=show, istop=1): readfortrancode(fn1, dowithline=dowithline, istop=0) break if not foundfile: - outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( - repr(fn), os.pathsep.join(include_dirs))) + outmess(f'readfortrancode: could not find include file {fn!r} ' + f'in {os.pathsep.join(include_dirs)}. Ignoring.\n') else: dowithline(finalline) l1 = ll @@ -549,8 +547,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): else: finalline = ll origfinalline = ll - filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( - fin.filelineno() - 1, currentfilename, l1) + filepositiontext = f'Line #{fin.filelineno() - 1} in {currentfilename}:"{l1}"\n\t' m = includeline.match(origfinalline) if m: fn = m.group('name') @@ -566,8 +563,8 @@ def readfortrancode(ffile, dowithline=show, istop=1): readfortrancode(fn1, dowithline=dowithline, istop=0) break if not foundfile: - outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( - repr(fn), os.pathsep.join(include_dirs))) + outmess(f'readfortrancode: could not find include file {fn!r} ' + f'in {os.pathsep.join(include_dirs)}. Ignoring.\n') else: dowithline(finalline) filepositiontext = '' @@ -735,8 +732,7 @@ def crackline(line, reset=0): if f77modulename and neededmodule == groupcounter: fl = 2 while groupcounter > fl: - outmess('crackline: groupcounter=%s groupname=%s\n' % - (repr(groupcounter), repr(groupname))) + outmess(f'crackline: groupcounter={groupcounter!r} groupname={groupname!r}\n') outmess( 'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') grouplist[groupcounter - 1].append(groupcache[groupcounter]) @@ -786,7 +782,8 @@ def crackline(line, reset=0): if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: continue m1 = re.match( - r'(?P[^"]*)\b%s\b\s*@\(@(?P[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I) + rf'(?P[^"]*)\b{name}\b\s*@\(@(?P[^@]*)@\)@.*\Z', + markouterparen(line), re.I) if m1: m2 = re_1.match(m1.group('before')) a = _simplifyargs(m1.group('args')) @@ -803,7 +800,7 @@ def crackline(line, reset=0): return if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')): previous_context = None - outmess('crackline:%d: No pattern for line\n' % (groupcounter)) + outmess(f'crackline:{groupcounter}: No pattern for line\n') return elif pat[1] == 'end': if 0 <= skipblocksuntil < groupcounter: @@ -811,16 +808,13 @@ def crackline(line, reset=0): if skipblocksuntil <= groupcounter: return if groupcounter <= 0: - raise Exception('crackline: groupcounter(=%s) is nonpositive. ' - 'Check the blocks.' - % (groupcounter)) + raise Exception(f'crackline: groupcounter(={groupcounter}) is nonpositive. ' + 'Check the blocks.') m1 = beginpattern[0].match(line) if (m1) and (not m1.group('this') == groupname[groupcounter]): - raise Exception('crackline: End group %s does not match with ' - 'previous Begin group %s\n\t%s' % - (repr(m1.group('this')), repr(groupname[groupcounter]), - filepositiontext) - ) + raise Exception(f'crackline: End group {m1.group("this")!r} ' + 'does not match with previous Begin group ' + f'{groupname[groupcounter]!r}\n\t{filepositiontext}') if skipblocksuntil == groupcounter: skipblocksuntil = -1 grouplist[groupcounter - 1].append(groupcache[groupcounter]) @@ -1086,8 +1080,7 @@ def analyzeline(m, case, line): grouplist[groupcounter] = [] if needmodule: if verbose > 1: - outmess('analyzeline: Creating module block %s\n' % - repr(f77modulename), 0) + outmess(f'analyzeline: Creating module block {f77modulename!r}\n', 0) groupname[groupcounter] = 'module' groupcache[groupcounter]['block'] = 'python module' groupcache[groupcounter]['name'] = f77modulename @@ -1101,13 +1094,13 @@ def analyzeline(m, case, line): grouplist[groupcounter] = [] if needinterface: if verbose > 1: - outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % ( - groupcounter), 0) + outmess('analyzeline: Creating additional interface block ' + f'({groupcounter=}).\n', 0) groupname[groupcounter] = 'interface' groupcache[groupcounter]['block'] = 'interface' groupcache[groupcounter]['name'] = 'unknown_interface' - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + prev_group = groupcache[groupcounter - 1] + groupcache[groupcounter]['from'] = f"{prev_group['from']}:{prev_group['name']}" groupcache[groupcounter]['body'] = [] groupcache[groupcounter]['externals'] = [] groupcache[groupcounter]['interfaced'] = [] @@ -1125,11 +1118,10 @@ def analyzeline(m, case, line): if groupcounter == 1: groupcache[groupcounter]['from'] = currentfilename elif f77modulename and groupcounter == 3: - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], currentfilename) + groupcache[groupcounter]['from'] = f"{groupcache[groupcounter - 1]['from']}:{currentfilename}" else: - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + prev_group = groupcache[groupcounter - 1] + groupcache[groupcounter]['from'] = f"{prev_group['from']}:{prev_group['name']}" for k in list(groupcache[groupcounter].keys()): if not groupcache[groupcounter][k]: del groupcache[groupcounter][k] @@ -1221,8 +1213,8 @@ def analyzeline(m, case, line): ll = ll[:i + 1] + '::' + ll[i + 1:] i = ll.find('::') if ll[i:] == '::' and 'args' in groupcache[groupcounter]: - outmess('All arguments will have attribute %s%s\n' % - (m.group('this'), ll[:i])) + outmess('All arguments will have attribute ' + f'{m.group("this")}{ll[:i]}\n') ll = ll + ','.join(groupcache[groupcounter]['args']) if i < 0: i = 0 @@ -1233,8 +1225,8 @@ def analyzeline(m, case, line): ch = markoutercomma(pl).split('@,@') if len(ch) > 1: pl = ch[0] - outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % ( - ','.join(ch[1:]))) + outmess("analyzeline: cannot handle multiple attributes without " + f"type specification. Ignoring {','.join(ch[1:])!r}.\n") last_name = None for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: @@ -1244,8 +1236,8 @@ def analyzeline(m, case, line): k = '' else: print(m.groupdict()) - outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % ( - case, repr(e))) + outmess(f'analyzeline: no name pattern found in {case} statement ' + f'for {e!r}. Skipping.\n') continue else: k = rmbadname1(m1.group('name')) @@ -1265,15 +1257,16 @@ def analyzeline(m, case, line): 'analyzeline: missing __user__ module (could be nothing)\n') # fixes ticket 1693 if k != groupcache[groupcounter]['name']: - outmess('analyzeline: appending intent(callback) %s' - ' to %s arguments\n' % (k, groupcache[groupcounter]['name'])) + outmess(f"analyzeline: appending intent(callback) {k}" + f" to {groupcache[groupcounter]['name']} " + "arguments\n") groupcache[groupcounter]['args'].append(k) else: errmess( f'analyzeline: intent(callback) {k} is ignored\n') else: - errmess('analyzeline: intent(callback) %s is already' - ' in argument list\n' % (k)) + errmess(f'analyzeline: intent(callback) {k} is already' + ' in argument list\n') if case in ['optional', 'required', 'public', 'external', 'private', 'intrinsic']: ap = case if 'attrspec' in edecl[k]: @@ -1312,8 +1305,8 @@ def analyzeline(m, case, line): if k not in edecl: edecl[k] = {} if '=' in edecl[k] and (not edecl[k]['='] == initexpr): - outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % ( - k, edecl[k]['='], initexpr)) + outmess(f'analyzeline: Overwriting the value of parameter "{k}" ' + f'("{edecl[k]["="]}") with "{initexpr}".\n') t = determineexprtype(initexpr, params) if t: if t.get('typespec') == 'real': @@ -1328,8 +1321,8 @@ def analyzeline(m, case, line): try: v = eval(initexpr, {}, params) except (SyntaxError, NameError, TypeError) as msg: - errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n' - % (initexpr, msg)) + errmess(f'analyzeline: Failed to evaluate {initexpr!r}. ' + f'Ignoring: {msg}\n') continue edecl[k]['='] = repr(v) if 'attrspec' in edecl[k]: @@ -1485,7 +1478,7 @@ def analyzeline(m, case, line): line = '//' + line cl = [] - [_, bn, ol] = re.split('/', line, maxsplit=2) # noqa: RUF039 + [_, bn, ol] = re.split('/', line, maxsplit=2) bn = bn.strip() if not bn: bn = '_BLNK_' @@ -1705,35 +1698,39 @@ def updatevars(typespec, selector, attrspec, entitydecl): if not_has_typespec: edecl['typespec'] = typespec elif typespec and (not typespec == edecl['typespec']): - outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % ( - ename, edecl['typespec'], typespec)) + current_typespec = edecl['typespec'] + outmess(f'updatevars: attempt to change the type of "{ename}" ' + f'("{current_typespec}") to "{typespec}". Ignoring.\n') if 'kindselector' not in edecl: edecl['kindselector'] = copy.copy(kindselect) elif kindselect: for k in list(kindselect.keys()): if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]): - outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( - k, ename, edecl['kindselector'][k], kindselect[k])) + current_kind = edecl['kindselector'][k] + outmess('updatevars: attempt to change the kindselector ' + f'"{k}" of "{ename}" ("{current_kind}") to ' + f'"{kindselect[k]}". Ignoring.\n') else: edecl['kindselector'][k] = copy.copy(kindselect[k]) if 'charselector' not in edecl and charselect: if not_has_typespec: edecl['charselector'] = charselect else: - errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' - % (ename, charselect)) + errmess(f'updatevars:{ename}: attempt to change empty charselector ' + f'to {charselect!r}. Ignoring.\n') elif charselect: for k in list(charselect.keys()): if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]): - outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( - k, ename, edecl['charselector'][k], charselect[k])) + outmess(f'updatevars: attempt to change the charselector "{k}" ' + f'of "{ename}" ("{edecl["charselector"][k]}") to ' + f'"{charselect[k]}". Ignoring.\n') else: edecl['charselector'][k] = copy.copy(charselect[k]) if 'typename' not in edecl: edecl['typename'] = typename elif typename and (not edecl['typename'] == typename): - outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % ( - ename, edecl['typename'], typename)) + outmess(f'updatevars: attempt to change the typename of "{ename}" ' + f'("{edecl["typename"]}") to "{typename}". Ignoring.\n') if 'attrspec' not in edecl: edecl['attrspec'] = copy.copy(attrspec) elif attrspec: @@ -1778,8 +1775,9 @@ def updatevars(typespec, selector, attrspec, entitydecl): else: d1['array'] = d1['array'] + ',' + d1['len'] del d1['len'] - errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % ( - typespec, e, typespec, ename, d1['array'])) + array_spec = d1['array'] + errmess(f'updatevars: "{typespec} {e}" is mapped to ' + f'"{typespec} {ename}({array_spec})"\n') if 'len' in d1: if typespec in ['complex', 'integer', 'logical', 'real']: @@ -1797,8 +1795,11 @@ def updatevars(typespec, selector, attrspec, entitydecl): if 'init' in d1: if '=' in edecl and (not edecl['='] == d1['init']): - outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % ( - ename, edecl['='], d1['init'])) + current_init = edecl['='] + new_init = d1['init'] + outmess('updatevars: attempt to change the init expression of ' + f'"{ename}" ("{current_init}") to "{new_init}". ' + f'Ignoring.\n') else: edecl['='] = d1['init'] @@ -1811,8 +1812,8 @@ def updatevars(typespec, selector, attrspec, entitydecl): for dm1 in edecl['attrspec']: if dm1[:9] == 'dimension' and dm1 != dm: del edecl['attrspec'][-1] - errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' - % (ename, dm1, dm)) + errmess(f'updatevars:{ename}: attempt to change ' + f'{dm1!r} to {dm!r}. Ignoring.\n') break else: @@ -1879,8 +1880,7 @@ def cracktypespec(typespec, selector): if typename: typename = typename.group('name') else: - outmess('cracktypespec: no typename found in %s\n' % - (repr(typespec + selector))) + outmess(f'cracktypespec: no typename found in {typespec + selector}\n') else: outmess(f'cracktypespec: no selector used for {repr(selector)}\n') return kindselect, charselect, typename @@ -1980,8 +1980,8 @@ def get_useparameters(block, param_map=None): for usename, mapping in list(usedict.items()): usename = usename.lower() if usename not in f90modulevars: - outmess('get_useparameters: no module %s info used by %s\n' % - (usename, block.get('name'))) + outmess(f'get_useparameters: no module {usename} info used by ' + f'{block.get("name")}\n') continue mvars = f90modulevars[usename] params = get_parameters(mvars) @@ -1992,8 +1992,8 @@ def get_useparameters(block, param_map=None): errmess(f'get_useparameters: mapping for {mapping} not impl.\n') for k, v in list(params.items()): if k in param_map: - outmess('get_useparameters: overriding parameter %s with' - ' value from module %s\n' % (repr(k), repr(usename))) + outmess(f'get_useparameters: overriding parameter {k!r} with' + f' value from module {usename!r}\n') param_map[k] = v return param_map @@ -2561,9 +2561,8 @@ def _eval_scalar(value, params): except (NameError, SyntaxError, TypeError): return value except Exception as msg: - errmess('"%s" in evaluating %r ' - '(available names: %s)\n' - % (msg, value, list(params.keys()))) + errmess(f'"{msg}" in evaluating {value!r} ' + f'(available names: {list(params)})\n') return value @@ -2611,7 +2610,7 @@ def analyzevars(block): try: dep_matches[n] except KeyError: - dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match + dep_matches[n] = re.compile(rf'.*\b{v}\b', re.I).match for n in svars: if n[0] in list(attrrules.keys()): vars[n] = setattrspec(vars[n], attrrules[n[0]]) @@ -2628,8 +2627,8 @@ def analyzevars(block): for l in implicitrules[ln0][k]: vars[n] = setattrspec(vars[n], l) elif n in block['args']: - outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % ( - repr(n), block['name'])) + outmess(f"analyzevars: typespec of variable {n!r} is not defined " + f"in routine {block['name']}.\n") if 'charselector' in vars[n]: if 'len' in vars[n]['charselector']: l = vars[n]['charselector']['len'] @@ -3263,15 +3262,14 @@ def crack2fortrangen(block, tab='\n', as_interface=False): f2pyenhancements = '' if 'f2pyenhancements' in block: for k in list(block['f2pyenhancements'].keys()): - f2pyenhancements = '%s%s%s %s' % ( - f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k]) + f2pyenhancements = (f"{f2pyenhancements}{tab + tabchar}{k} " + f"{block['f2pyenhancements'][k]}") intent_lst = block.get('intent', [])[:] if blocktype == 'function' and 'callback' in intent_lst: intent_lst.remove('callback') if intent_lst: - f2pyenhancements = '%s%sintent(%s) %s' %\ - (f2pyenhancements, tab + tabchar, - ','.join(intent_lst), name) + f2pyenhancements = (f"{f2pyenhancements}{tab + tabchar}" + f"intent({','.join(intent_lst)}) {name}") use = '' if 'use' in block: use = use2fortran(block['use'], tab + tabchar) @@ -3298,8 +3296,9 @@ def crack2fortrangen(block, tab='\n', as_interface=False): body = body + entry_stmts if blocktype == 'block data' and name == '_BLOCK_DATA_': name = '' - ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % ( - tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name) + ret = (f'{tab}{prefix}{blocktype} {name}{args}{result} ' + f'{mess}{f2pyenhancements}{use}{vars}{common}{body}{tab}end ' + f'{blocktype} {name}') return ret @@ -3506,11 +3505,11 @@ def crack2fortran(block): header = """! -*- f90 -*- ! Note: the context of this file is case sensitive. """ - footer = """ -! This file was auto-generated with f2py (version:%s). + footer = f""" +! This file was auto-generated with f2py (version:{f2py_version}). ! See: ! https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e -""" % (f2py_version) +""" return header + pyf + footer diff --git a/numpy/f2py/crackfortran.pyi b/numpy/f2py/crackfortran.pyi index 742d358916a2..09213e156636 100644 --- a/numpy/f2py/crackfortran.pyi +++ b/numpy/f2py/crackfortran.pyi @@ -1,28 +1,16 @@ import re from _typeshed import StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Mapping -from typing import ( - IO, - Any, - Concatenate, - Final, - Literal as L, - Never, - ParamSpec, - TypeAlias, - overload, -) +from typing import IO, Any, Concatenate, Final, Literal as L, Never, overload from .__version__ import version from .auxfuncs import isintent_dict as isintent_dict ### -_Tss = ParamSpec("_Tss") - -_VisitResult: TypeAlias = list[Any] | dict[str, Any] | None -_VisitItem: TypeAlias = tuple[str | None, _VisitResult] -_VisitFunc: TypeAlias = Callable[Concatenate[_VisitItem, list[_VisitItem], _VisitResult, _Tss], _VisitItem | None] +type _VisitResult = list[Any] | dict[str, Any] | None +type _VisitItem = tuple[str | None, _VisitResult] +type _VisitFunc[**Tss] = Callable[Concatenate[_VisitItem, list[_VisitItem], _VisitResult, Tss], _VisitItem | None] ### @@ -243,13 +231,13 @@ def crackfortran(files: StrOrBytesPath | Iterable[StrOrBytesPath]) -> list[dict[ def crack2fortran(block: Mapping[str, Any]) -> str: ... # -def traverse( +def traverse[**Tss]( obj: tuple[str | None, _VisitResult], - visit: _VisitFunc[_Tss], + visit: _VisitFunc[Tss], parents: list[tuple[str | None, _VisitResult]] = [], result: list[Any] | dict[str, Any] | None = None, - *args: _Tss.args, - **kwargs: _Tss.kwargs, + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> _VisitItem | _VisitResult: ... # diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py index 7eb1697cc787..f6ddbf09ad9a 100644 --- a/numpy/f2py/diagnose.py +++ b/numpy/f2py/diagnose.py @@ -23,10 +23,10 @@ def run(): try: import numpy - has_newnumpy = 1 + has_numpy = 1 except ImportError as e: - print('Failed to import new numpy:', e) - has_newnumpy = 0 + print('Failed to import numpy:', e) + has_numpy = 0 try: from numpy.f2py import f2py2e @@ -35,113 +35,21 @@ def run(): print('Failed to import f2py2e:', e) has_f2py2e = 0 - try: - import numpy.distutils - has_numpy_distutils = 2 - except ImportError: - try: - import numpy_distutils - has_numpy_distutils = 1 - except ImportError as e: - print('Failed to import numpy_distutils:', e) - has_numpy_distutils = 0 - - if has_newnumpy: + if has_numpy: try: - print(f'Found new numpy version {numpy.__version__!r} in {numpy.__file__}') + print(f'Found numpy version {numpy.__version__!r} in {numpy.__file__}') except Exception as msg: print('error:', msg) print('------') if has_f2py2e: try: - print('Found f2py2e version %r in %s' % - (f2py2e.__version__.version, f2py2e.__file__)) + print(f'Found f2py2e version {f2py2e.__version__.version!r} in ' + f'{f2py2e.__file__}') except Exception as msg: print('error:', msg) print('------') - if has_numpy_distutils: - try: - if has_numpy_distutils == 2: - print('Found numpy.distutils version %r in %r' % ( - numpy.distutils.__version__, - numpy.distutils.__file__)) - else: - print('Found numpy_distutils version %r in %r' % ( - numpy_distutils.numpy_distutils_version.numpy_distutils_version, - numpy_distutils.__file__)) - print('------') - except Exception as msg: - print('error:', msg) - print('------') - try: - if has_numpy_distutils == 1: - print( - 'Importing numpy_distutils.command.build_flib ...', end=' ') - import numpy_distutils.command.build_flib as build_flib - print('ok') - print('------') - try: - print( - 'Checking availability of supported Fortran compilers:') - for compiler_class in build_flib.all_compilers: - compiler_class(verbose=1).is_available() - print('------') - except Exception as msg: - print('error:', msg) - print('------') - except Exception as msg: - print( - 'error:', msg, '(ignore it, build_flib is obsolete for numpy.distutils 0.2.2 and up)') - print('------') - try: - if has_numpy_distutils == 2: - print('Importing numpy.distutils.fcompiler ...', end=' ') - import numpy.distutils.fcompiler as fcompiler - else: - print('Importing numpy_distutils.fcompiler ...', end=' ') - import numpy_distutils.fcompiler as fcompiler - print('ok') - print('------') - try: - print('Checking availability of supported Fortran compilers:') - fcompiler.show_fcompilers() - print('------') - except Exception as msg: - print('error:', msg) - print('------') - except Exception as msg: - print('error:', msg) - print('------') - try: - if has_numpy_distutils == 2: - print('Importing numpy.distutils.cpuinfo ...', end=' ') - from numpy.distutils.cpuinfo import cpuinfo - print('ok') - print('------') - else: - try: - print( - 'Importing numpy_distutils.command.cpuinfo ...', end=' ') - from numpy_distutils.command.cpuinfo import cpuinfo - print('ok') - print('------') - except Exception as msg: - print('error:', msg, '(ignore it)') - print('Importing numpy_distutils.cpuinfo ...', end=' ') - from numpy_distutils.cpuinfo import cpuinfo - print('ok') - print('------') - cpu = cpuinfo() - print('CPU information:', end=' ') - for name in dir(cpuinfo): - if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])(): - print(name[1:], end=' ') - print('------') - except Exception as msg: - print('error:', msg) - print('------') os.chdir(_path) diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 84a5aa3c20a6..98c2b7c65805 100644 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -36,7 +36,6 @@ # outmess=sys.stdout.write show = pprint.pprint outmess = auxfuncs.outmess -MESON_ONLY_VER = (sys.version_info >= (3, 12)) __usage__ =\ f"""Usage: @@ -117,10 +116,6 @@ --include-paths ::... Search include files from the given directories. - --help-link [..] List system resources found by system_info.py. See also - --link- switch below. [..] is optional list - of resources names. E.g. try 'f2py --help-link lapack_opt'. - --f2cmap Load Fortran-to-Python KIND specification from the given file. Default: .f2py_f2cmap in current directory. @@ -401,8 +396,8 @@ def buildmodules(lst): ret = {} for module, name in zip(modules, mnames): if name in isusedby: - outmess('\tSkipping module "%s" which is used by %s.\n' % ( - name, ','.join('"%s"' % s for s in isusedby[name]))) + using_modules = ','.join(f'"{s}"' for s in isusedby[name]) + outmess(f'\tSkipping module "{name}" which is used by {using_modules}.\n') else: um = [] if 'use' in module: @@ -503,8 +498,8 @@ def run_main(comline_list): if 'python module' not in options: errmess( 'Tip: If your original code is Fortran source then you must use -m option.\n') - raise TypeError('All blocks must be python module blocks but got %s' % ( - repr(plist['block']))) + raise TypeError('All blocks must be python module blocks but got ' + f'{plist["block"]!r}') auxfuncs.debugoptions = options['debug'] f90mod_rules.options = options auxfuncs.wrapfuncs = options['wrapfuncs'] @@ -583,7 +578,7 @@ def preparse_sysargv(): sys.argv = [sys.argv[0]] + remaining_argv backend_key = args.backend - if MESON_ONLY_VER and backend_key == 'distutils': + if backend_key == 'distutils': outmess("Cannot use distutils backend with Python>=3.12," " using meson backend instead.\n") backend_key = "meson" @@ -657,35 +652,16 @@ def run_compile(): reg_distutils_flags = re.compile(r'--((f(77|90)exec|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') fc_flags = [_m for _m in sys.argv[1:] if reg_f77_f90_flags.match(_m)] distutils_flags = [_m for _m in sys.argv[1:] if reg_distutils_flags.match(_m)] - if not (MESON_ONLY_VER or backend_key == 'meson'): - fc_flags.extend(distutils_flags) sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + distutils_flags)] del_list = [] for s in flib_flags: v = '--fcompiler=' if s[:len(v)] == v: - if MESON_ONLY_VER or backend_key == 'meson': - outmess( - "--fcompiler cannot be used with meson," - "set compiler with the FC environment variable\n" - ) - else: - from numpy.distutils import fcompiler - fcompiler.load_all_fcompiler_classes() - allowed_keys = list(fcompiler.fcompiler_class.keys()) - nv = ov = s[len(v):].lower() - if ov not in allowed_keys: - vmap = {} # XXX - try: - nv = vmap[ov] - except KeyError: - if ov not in vmap.values(): - print(f'Unknown vendor: "{s[len(v):]}"') - nv = ov - i = flib_flags.index(s) - flib_flags[i] = '--fcompiler=' + nv # noqa: B909 - continue + outmess( + "--fcompiler cannot be used with meson," + "set compiler with the FC environment variable\n" + ) for s in del_list: i = flib_flags.index(s) del flib_flags[i] @@ -773,15 +749,6 @@ def validate_modulename(pyf_files, modulename='untitled'): return modulename def main(): - if '--help-link' in sys.argv[1:]: - sys.argv.remove('--help-link') - if MESON_ONLY_VER: - outmess("Use --dep for meson builds\n") - else: - from numpy.distutils.system_info import show_all - show_all() - return - if '-c' in sys.argv[1:]: run_compile() else: diff --git a/numpy/f2py/f2py2e.pyi b/numpy/f2py/f2py2e.pyi index 46794e552b41..4dd6a9f73ec3 100644 --- a/numpy/f2py/f2py2e.pyi +++ b/numpy/f2py/f2py2e.pyi @@ -2,17 +2,13 @@ import argparse import pprint from collections.abc import Hashable, Iterable, Mapping, MutableMapping, Sequence from types import ModuleType -from typing import Any, Final, NotRequired, TypedDict, type_check_only -from typing_extensions import TypeVar, override +from typing import Any, Final, NotRequired, TypedDict, override, type_check_only from .__version__ import version from .auxfuncs import _Bool, outmess as outmess ### -_KT = TypeVar("_KT", bound=Hashable) -_VT = TypeVar("_VT") - @type_check_only class _F2PyDict(TypedDict): csrc: list[str] @@ -28,7 +24,6 @@ class _PreparseResult(TypedDict): ### -MESON_ONLY_VER: Final[bool] f2py_version: Final = version numpy_version: Final = version __usage__: Final[str] @@ -55,7 +50,7 @@ def main() -> None: ... def scaninputline(inputline: Iterable[str]) -> tuple[list[str], dict[str, _Bool]]: ... def callcrackfortran(files: list[str], options: dict[str, bool]) -> list[dict[str, Any]]: ... def buildmodules(lst: Iterable[Mapping[str, object]]) -> dict[str, dict[str, Any]]: ... -def dict_append(d_out: MutableMapping[_KT, _VT], d_in: Mapping[_KT, _VT]) -> None: ... +def dict_append[KT: Hashable, VT](d_out: MutableMapping[KT, VT], d_in: Mapping[KT, VT]) -> None: ... def filter_files( prefix: str, suffix: str, diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index d13a42a9d71f..0ecce40aa856 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -39,11 +39,11 @@ def findf90modules(m): return ret -fgetdims1 = """\ +fgetdims1 = f"""\ external f2pysetdata logical ns integer r,i - integer(%d) s(*) + integer({np.intp().itemsize}) s(*) ns = .FALSE. if (allocated(d)) then do i=1,r @@ -55,7 +55,7 @@ def findf90modules(m): deallocate(d) end if end if - if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % np.intp().itemsize + if ((.not.allocated(d)).and.(s(1).ge.1)) then""" fgetdims2 = """\ end if @@ -136,8 +136,8 @@ def iadd(line, s=ihooks): s[0] = f'{s[0]}\n{line}' vrd = capi_maps.modsign2map(m) - cadd('static FortranDataDef f2py_%s_def[] = {' % (m['name'])) - dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n' % (m['name'])) + cadd(f"static FortranDataDef f2py_{m['name']}_def[] = {{") + dadd(f"\\subsection{{Fortran 90/95 module \\texttt{{{m['name']}}}}}\n") if hasnote(m): note = m['note'] if isinstance(note, list): @@ -156,11 +156,12 @@ def iadd(line, s=ihooks): if not dms: dms = '-1' use_fgetdims2 = fgetdims2 - cadd('\t{"%s",%s,{{%s}},%s, %s},' % - (undo_rmbadname1(n), dm['rank'], dms, at, - capi_maps.get_elsize(var))) - dadd('\\item[]{{}\\verb@%s@{}}' % - (capi_maps.getarrdocsign(n, var))) + rank = dm['rank'] + elsize = capi_maps.get_elsize(var) + cadd(f'\t{{"{undo_rmbadname1(n)}",{rank},{{{{{dms}}}}},{at}, ' + f'{elsize}}},') + docsign = capi_maps.getarrdocsign(n, var) + dadd(f'\\item[]{{{{}}\\verb@{docsign}@{{}}}}') if hasnote(var): note = var['note'] if isinstance(note, list): @@ -178,8 +179,8 @@ def iadd(line, s=ihooks): fadd('integer flag\n') fhooks[0] = fhooks[0] + fgetdims1 dms = range(1, int(dm['rank']) + 1) - fadd(' allocate(d(%s))\n' % - (','.join(['s(%s)' % i for i in dms]))) + alloc_args = ','.join(f's({i})' for i in dms) + fadd(f' allocate(d({alloc_args}))\n') fhooks[0] = fhooks[0] + use_fgetdims2 fadd(f'end subroutine {fargs[-1]}') else: @@ -190,60 +191,70 @@ def iadd(line, s=ihooks): if onlyvars: dadd('\\end{description}') if hasbody(m): + m_name = m['name'] for b in m['body']: + b_name = b['name'] if not isroutine(b): outmess("f90mod_rules.buildhooks:" - f" skipping {b['block']} {b['name']}\n") + f" skipping {b['block']} {b_name}\n") continue - modobjs.append(f"{b['name']}()") - b['modulename'] = m['name'] + modobjs.append(f"{b_name}()") + b['modulename'] = m_name api, wrap = rules.buildapi(b) if isfunction(b): fhooks[0] = fhooks[0] + wrap - fargs.append(f"f2pywrap_{m['name']}_{b['name']}") + fargs.append(f"f2pywrap_{m_name}_{b_name}") ifargs.append(func2subr.createfuncwrapper(b, signature=1)) elif wrap: fhooks[0] = fhooks[0] + wrap - fargs.append(f"f2pywrap_{m['name']}_{b['name']}") + fargs.append(f"f2pywrap_{m_name}_{b_name}") ifargs.append( func2subr.createsubrwrapper(b, signature=1)) else: - fargs.append(b['name']) + fargs.append(b_name) mfargs.append(fargs[-1]) api['externroutines'] = [] ar = applyrules(api, vrd) ar['docs'] = [] ar['docshort'] = [] ret = dictappend(ret, ar) - cadd(('\t{"%s",-1,{{-1}},0,0,NULL,(void *)' - 'f2py_rout_#modulename#_%s_%s,' - 'doc_f2py_rout_#modulename#_%s_%s},') - % (b['name'], m['name'], b['name'], m['name'], b['name'])) - sargs.append(f"char *{b['name']}") + cadd(f'\t{{"{b_name}",-1,{{{{-1}}}},0,0,NULL,(void *)' + f'f2py_rout_#modulename#_{m_name}_{b_name},' + f'doc_f2py_rout_#modulename#_{m_name}_{b_name}}},') + sargs.append(f"char *{b_name}") sargsp.append('char *') - iadd(f"\tf2py_{m['name']}_def[i_f2py++].data = {b['name']};") + iadd(f"\tf2py_{m_name}_def[i_f2py++].data = {b_name};") cadd('\t{NULL}\n};\n') iadd('}') - ihooks[0] = 'static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s' % ( - m['name'], ','.join(sargs), ihooks[0]) - if '_' in m['name']: + m_name = m['name'] + sargs_str = ','.join(sargs) + ihooks[0] = (f'static void f2py_setup_{m_name}({sargs_str}) ' + f'{{\n\tint i_f2py=0;{ihooks[0]}') + if '_' in m_name: F_FUNC = 'F_FUNC_US' else: F_FUNC = 'F_FUNC' - iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));' - % (F_FUNC, m['name'], m['name'].upper(), ','.join(sargsp))) - iadd('static void f2py_init_%s(void) {' % (m['name'])) - iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' - % (F_FUNC, m['name'], m['name'].upper(), m['name'])) + sargsp_str = ','.join(sargsp) + iadd(f'extern void {F_FUNC}(f2pyinit{m_name},' + f'F2PYINIT{m_name.upper()})(void (*)({sargsp_str}));') + iadd(f'static void f2py_init_{m_name}(void) {{') + iadd(f'\t{F_FUNC}(f2pyinit{m_name},' + f'F2PYINIT{m_name.upper()})(f2py_setup_{m_name});') iadd('}\n') ret['f90modhooks'] = ret['f90modhooks'] + chooks + ihooks - ret['initf90modhooks'] = ['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % ( - m['name'], m['name'], m['name'])] + ret['initf90modhooks'] + ret['initf90modhooks'] = [ + '\t{', + ('\t\tPyObject *tmp = ' + f'PyFortranObject_New(f2py_{m_name}_def,f2py_init_{m_name});'), + f'\t\tPyDict_SetItemString(d, "{m_name}", tmp);', + '\t\tPy_XDECREF(tmp);', + '\t}', + ] + ret["initf90modhooks"] fadd('') - fadd(f"subroutine f2pyinit{m['name']}(f2pysetupfunc)") + fadd(f"subroutine f2pyinit{m_name}(f2pysetupfunc)") if mfargs: for a in undo_rmbadname(mfargs): - fadd(f"use {m['name']}, only : {a}") + fadd(f"use {m_name}, only : {a}") if ifargs: fadd(' '.join(['interface'] + ifargs)) fadd('end interface') @@ -252,13 +263,14 @@ def iadd(line, s=ihooks): for a in undo_rmbadname(efargs): fadd(f'external {a}') fadd(f"call f2pysetupfunc({','.join(undo_rmbadname(fargs))})") - fadd(f"end subroutine f2pyinit{m['name']}\n") + fadd(f"end subroutine f2pyinit{m_name}\n") dadd('\n'.join(ret['latexdoc']).replace( r'\subsection{', r'\subsubsection{')) ret['latexdoc'] = [] - ret['docs'].append(f"\"\t{m['name']} --- {','.join(undo_rmbadname(modobjs))}\"") + modobjs_str = ','.join(undo_rmbadname(modobjs)) + ret['docs'].append(f"\"\t{m_name} --- {modobjs_str}\"") ret['routine_defs'] = '' ret['doc'] = [] diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py index 09b67f7c3085..3484a2e78457 100644 --- a/numpy/f2py/func2subr.py +++ b/numpy/f2py/func2subr.py @@ -297,8 +297,8 @@ def assubr(rout): if isfunction_wrap(rout): fortranname = getfortranname(rout) name = rout['name'] - outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n' % ( - name, fortranname)) + outmess('\t\tCreating wrapper for Fortran function ' + f'"{name}"("{fortranname}")...\n') rout = copy.copy(rout) fname = name rname = fname @@ -322,8 +322,8 @@ def assubr(rout): if issubroutine_wrap(rout): fortranname = getfortranname(rout) name = rout['name'] - outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n' - % (name, fortranname)) + outmess('\t\tCreating wrapper for Fortran subroutine ' + f'"{name}"("{fortranname}")...\n') rout = copy.copy(rout) return rout, createsubrwrapper(rout) return rout, '' diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 68c49e60028e..62d8c8af61f9 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -96,6 +96,7 @@ isintent_copy, isintent_hide, isintent_inout, + isintent_inplace, isintent_nothide, isintent_out, isintent_overwrite, @@ -1015,8 +1016,8 @@ 'frompyobj': [{hasinitvalue: ' if (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, {l_and(isoptional, l_not(hasinitvalue)) : ' if (#varname#_capi != Py_None)'}, - ' f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' - '\n if (f2py_success) {'], + (' f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' + '\n if (f2py_success) {')], 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname# frompyobj*/', 'need': ['#ctype#_from_pyobj'], '_check': l_and(iscomplex, isintent_nothide), @@ -1057,13 +1058,13 @@ {l_and(isintent_out, l_not(isintent_c)): 'STRINGPADN'}], '_check': isstring }, { # Common - 'frompyobj': [ + 'frompyobj': [( """\ slen(#varname#) = #elsize#; f2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,""" """#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth#""" """`#varname#\' of #pyname# to C #ctype#\"); - if (f2py_success) {""", + if (f2py_success) {"""), # The trailing null value for Fortran is blank. {l_not(isintent_c): " STRINGPADN(#varname#, slen(#varname#), '\\0', ' ');"}, @@ -1200,9 +1201,20 @@ } if (f2py_success) {"""]}, ], + 'pyobjfrom': [ + {l_and(isintent_inplace, l_not(isintent_out)): """\ + f2py_success = (PyArray_ResolveWritebackIfCopy(capi_#varname#_as_array) >= 0); + if (f2py_success) { /* inplace array #varname# has been written back to */"""}, + {l_and(isintent_inplace, isintent_out): """\ + f2py_success = (PyArray_ResolveWritebackIfCopy(capi_#varname#_as_array) >= 0); + if (f2py_success) { /* return written-back-to inplace array #varname# */ + Py_INCREF(#varname#_capi); + Py_SETREF(capi_#varname#_as_array, (PyArrayObject*)#varname#_capi);"""}, + ], + 'closepyobjfrom': {isintent_inplace: ' } /*if (f2py_success) of #varname# pyobjfrom*/'}, 'cleanupfrompyobj': [ # note that this list will be reversed - ' } ' - '/* if (capi_#varname#_as_array == NULL) ... else of #varname# */', + (' } ' + '/* if (capi_#varname#_as_array == NULL) ... else of #varname# */'), {l_not(l_or(isintent_out, isintent_hide)): """\ if((PyObject *)capi_#varname#_as_array!=#varname#_capi) { Py_XDECREF(capi_#varname#_as_array); }"""}, @@ -1431,8 +1443,7 @@ def buildmodule(m, um): with open(fn, 'w') as f: f.write('.. -*- rest -*-\n') f.write('\n'.join(ar['restdoc'])) - outmess(' ReST Documentation is saved to file "%s/%smodule.rest"\n' % - (options['buildpath'], vrd['modulename'])) + outmess(f' ReST Documentation is saved to file "{fn}"\n') if options['dolatexdoc']: fn = os.path.join( options['buildpath'], vrd['modulename'] + 'module.tex') @@ -1446,8 +1457,7 @@ def buildmodule(m, um): f.write('\n'.join(ar['latexdoc'])) if 'shortlatex' not in options: f.write('\\end{document}') - outmess(' Documentation is saved to file "%s/%smodule.tex"\n' % - (options['buildpath'], vrd['modulename'])) + outmess(f' Documentation is saved to file "{fn}"\n') if funcwrappers: wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output']) ret['fsrc'] = wn @@ -1515,8 +1525,10 @@ def buildapi(rout): var = rout['vars'] if ismoduleroutine(rout): - outmess(' Constructing wrapper function "%s.%s"...\n' % - (rout['modulename'], rout['name'])) + module_name = rout['modulename'] + name = rout['name'] + outmess(' Constructing wrapper function ' + f'"{module_name}.{name}"...\n') else: outmess(f" Constructing wrapper function \"{rout['name']}\"...\n") # Routine diff --git a/numpy/f2py/rules.pyi b/numpy/f2py/rules.pyi index 30439f6b8351..c45d42289363 100644 --- a/numpy/f2py/rules.pyi +++ b/numpy/f2py/rules.pyi @@ -1,17 +1,12 @@ from collections.abc import Callable, Iterable, Mapping -from typing import Any, Final, Literal as L, TypeAlias -from typing_extensions import TypeVar +from typing import Any, Final, Literal as L from .__version__ import version from .auxfuncs import _Bool, _Var -### - -_VT = TypeVar("_VT", default=str) - -_Predicate: TypeAlias = Callable[[_Var], _Bool] -_RuleDict: TypeAlias = dict[str, _VT] -_DefDict: TypeAlias = dict[_Predicate, _VT] +type _Predicate = Callable[[_Var], _Bool] +type _RuleDict[VT] = dict[str, VT] +type _DefDict[VT] = dict[_Predicate, VT] ### @@ -24,9 +19,9 @@ sepdict: Final[dict[str, str]] = ... generationtime: Final[int] = ... typedef_need_dict: Final[_DefDict[str]] = ... -module_rules: Final[_RuleDict[str | list[str] | _RuleDict]] = ... -routine_rules: Final[_RuleDict[str | list[str] | _DefDict | _RuleDict]] = ... -defmod_rules: Final[list[_RuleDict[str | _DefDict]]] = ... +module_rules: Final[_RuleDict[str | list[str] | _RuleDict[str]]] = ... +routine_rules: Final[_RuleDict[str | list[str] | _DefDict[str] | _RuleDict[str]]] = ... +defmod_rules: Final[list[_RuleDict[str | _DefDict[str]]]] = ... rout_rules: Final[list[_RuleDict[str | Any]]] = ... aux_rules: Final[list[_RuleDict[str | Any]]] = ... arg_rules: Final[list[_RuleDict[str | Any]]] = ... @@ -34,8 +29,8 @@ check_rules: Final[list[_RuleDict[str | Any]]] = ... stnd: Final[dict[L[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], L["st", "nd", "rd", "th"]]] = ... -def buildmodule(m: Mapping[str, str | Any], um: Iterable[Mapping[str, str | Any]]) -> _RuleDict: ... -def buildapi(rout: Mapping[str, str]) -> tuple[_RuleDict, str]: ... +def buildmodule(m: Mapping[str, str | Any], um: Iterable[Mapping[str, str | Any]]) -> _RuleDict[str]: ... +def buildapi(rout: Mapping[str, str]) -> tuple[_RuleDict[str], str]: ... # namespace pollution k: str diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index d6664d6bdfb7..d8151db0c4e4 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -192,13 +192,24 @@ PyFortranObject_NewAsAttr(FortranDataDef *defs) } fp->len = 1; fp->defs = defs; + PyObject *name; if (defs->rank == -1) { - PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("function %s", defs->name)); + name = PyUnicode_FromFormat("function %s", defs->name); } else if (defs->rank == 0) { - PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("scalar %s", defs->name)); + name = PyUnicode_FromFormat("scalar %s", defs->name); } else { - PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("array %s", defs->name)); + name = PyUnicode_FromFormat("array %s", defs->name); } + if (name == NULL) { + Py_DECREF(fp); + return NULL; + } + if (PyDict_SetItemString(fp->dict, "__name__", name) < 0) { + Py_DECREF(name); + Py_DECREF(fp); + return NULL; + } + Py_DECREF(name); return (PyObject *)fp; } @@ -565,6 +576,47 @@ fortran_repr(PyFortranObject *fp) return repr; } +static PyObject * +fortran_dir(PyFortranObject *fp, PyObject *Py_UNUSED(args)) +{ + int i; + PyObject *dir_list = PyDict_Keys(fp->dict); + if (dir_list == NULL) { + return NULL; + } + for (i = 0; i < fp->len; i++) { + PyObject *name = PyUnicode_FromString(fp->defs[i].name); + if (name == NULL) { + Py_DECREF(dir_list); + return NULL; + } + int contains = PySequence_Contains(dir_list, name); + if (contains == -1) { + Py_DECREF(name); + Py_DECREF(dir_list); + return NULL; + } + if (!contains) { + if (PyList_Append(dir_list, name) < 0) { + Py_DECREF(name); + Py_DECREF(dir_list); + return NULL; + } + } + Py_DECREF(name); + } + if (PyList_Sort(dir_list) < 0) { + Py_DECREF(dir_list); + return NULL; + } + return dir_list; +} + +static PyMethodDef fortran_methods[] = { + {"__dir__", (PyCFunction)fortran_dir, METH_NOARGS, NULL}, + {NULL, NULL, 0, NULL} +}; + PyTypeObject PyFortran_Type = { PyVarObject_HEAD_INIT(NULL, 0).tp_name = "fortran", .tp_basicsize = sizeof(PyFortranObject), @@ -572,6 +624,7 @@ PyTypeObject PyFortran_Type = { .tp_getattr = (getattrfunc)fortran_getattr, .tp_setattr = (setattrfunc)fortran_setattr, .tp_repr = (reprfunc)fortran_repr, + .tp_methods = fortran_methods, .tp_call = (ternaryfunc)fortran_call, }; @@ -774,30 +827,6 @@ dump_attrs(const PyArrayObject *obj) } #endif -#define SWAPTYPE(a, b, t) \ - { \ - t c; \ - c = (a); \ - (a) = (b); \ - (b) = c; \ - } - -static int -swap_arrays(PyArrayObject *obj1, PyArrayObject *obj2) -{ - PyArrayObject_fields *arr1 = (PyArrayObject_fields *)obj1, - *arr2 = (PyArrayObject_fields *)obj2; - SWAPTYPE(arr1->data, arr2->data, char *); - SWAPTYPE(arr1->nd, arr2->nd, int); - SWAPTYPE(arr1->dimensions, arr2->dimensions, npy_intp *); - SWAPTYPE(arr1->strides, arr2->strides, npy_intp *); - SWAPTYPE(arr1->base, arr2->base, PyObject *); - SWAPTYPE(arr1->descr, arr2->descr, PyArray_Descr *); - SWAPTYPE(arr1->flags, arr2->flags, int); - /* SWAPTYPE(arr1->weakreflist,arr2->weakreflist,PyObject*); */ - return 0; -} - #define ARRAY_ISCOMPATIBLE(arr,type_num) \ ((PyArray_ISINTEGER(arr) && PyTypeNum_ISINTEGER(type_num)) || \ (PyArray_ISFLOAT(arr) && PyTypeNum_ISFLOAT(type_num)) || \ @@ -1039,32 +1068,38 @@ ndarray_from_pyobj(const int type_num, return NULL; } - /* here we have always intent(in) or intent(inplace) */ + /* + * Here, we have always intent(in) or intent(inplace) + * and require a copy for input. We allow arbitrary casting for + * input, but for inplace we check that the types are equivalent. + */ { - PyArrayObject * retarr = (PyArrayObject *) \ - PyArray_NewFromDescr(&PyArray_Type, descr, PyArray_NDIM(arr), PyArray_DIMS(arr), - NULL, NULL, !(intent & F2PY_INTENT_C), NULL); + int flags = NPY_ARRAY_FORCECAST | NPY_ARRAY_ENSURECOPY + | ((intent & F2PY_INTENT_C) ? NPY_ARRAY_IN_ARRAY + : NPY_ARRAY_IN_FARRAY); + if (intent & F2PY_INTENT_INPLACE) { + if (!(ARRAY_ISCOMPATIBLE(arr, type_num)) || + (PyArray_ISSIGNED(arr) && PyTypeNum_ISUNSIGNED(type_num)) || + (PyArray_ISUNSIGNED(arr) && PyTypeNum_ISSIGNED(type_num)) + ) { + sprintf(mess, "failed to initialize intent(inplace) array" + " -- input '%c' not compatible to '%c'", + PyArray_DESCR(arr)->type, descr->type); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } + flags |= NPY_ARRAY_WRITEBACKIFCOPY; + } + /* Steals reference to descr */ + PyArrayObject *retarr = (PyArrayObject *)PyArray_FromArray( + arr, descr, flags); if (retarr==NULL) { - Py_DECREF(descr); return NULL; } + arr = retarr; F2PY_REPORT_ON_ARRAY_COPY_FROMARR; - if (PyArray_CopyInto(retarr, arr)) { - Py_DECREF(retarr); - return NULL; - } - if (intent & F2PY_INTENT_INPLACE) { - if (swap_arrays(arr,retarr)) { - Py_DECREF(retarr); - return NULL; /* XXX: set exception */ - } - Py_XDECREF(retarr); - if (intent & F2PY_INTENT_OUT) - Py_INCREF(arr); - } else { - arr = retarr; - } } return arr; } diff --git a/numpy/f2py/src/fortranobject.h b/numpy/f2py/src/fortranobject.h index 4aed2f60891b..21ff12d9b622 100644 --- a/numpy/f2py/src/fortranobject.h +++ b/numpy/f2py/src/fortranobject.h @@ -130,9 +130,9 @@ F2PyGetThreadLocalCallbackPtr(char *key); : (F2PY_ALIGN8(intent) ? 8 : (F2PY_ALIGN16(intent) ? 16 : 1))) #define F2PY_CHECK_ALIGNMENT(arr, intent) \ ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent)) -#define F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr) ((PyArray_DESCR(arr)->type_num == NPY_STRING && PyArray_ITEMSIZE(arr) >= 1) \ - || PyArray_DESCR(arr)->type_num == NPY_UINT8) -#define F2PY_IS_UNICODE_ARRAY(arr) (PyArray_DESCR(arr)->type_num == NPY_UNICODE) +#define F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr) ((PyDataType_TYPENUM(PyArray_DESCR(arr)) == NPY_STRING && PyArray_ITEMSIZE(arr) >= 1) \ + || PyDataType_TYPENUM(PyArray_DESCR(arr)) == NPY_UINT8) +#define F2PY_IS_UNICODE_ARRAY(arr) (PyDataType_TYPENUM(PyArray_DESCR(arr)) == NPY_UNICODE) extern PyArrayObject * ndarray_from_pyobj(const int type_num, const int elsize_, npy_intp *dims, diff --git a/numpy/f2py/symbolic.pyi b/numpy/f2py/symbolic.pyi index 06be2bb16044..94ad1461760b 100644 --- a/numpy/f2py/symbolic.pyi +++ b/numpy/f2py/symbolic.pyi @@ -1,28 +1,25 @@ from collections.abc import Callable, Mapping from enum import Enum -from typing import Any, Generic, Literal as L, ParamSpec, Self, TypeAlias, overload +from typing import Any, Generic, Literal as L, Self, overload from typing_extensions import TypeVar __all__ = ["Expr"] ### -_Tss = ParamSpec("_Tss") -_ExprT = TypeVar("_ExprT", bound=Expr) -_ExprT1 = TypeVar("_ExprT1", bound=Expr) -_ExprT2 = TypeVar("_ExprT2", bound=Expr) +# Explicit covariance is required here due to the inexpressible read-only attributes. _OpT_co = TypeVar("_OpT_co", bound=Op, default=Op, covariant=True) _LanguageT_co = TypeVar("_LanguageT_co", bound=Language, default=Language, covariant=True) _DataT_co = TypeVar("_DataT_co", default=Any, covariant=True) _LeftT_co = TypeVar("_LeftT_co", default=Any, covariant=True) _RightT_co = TypeVar("_RightT_co", default=Any, covariant=True) -_RelCOrPy: TypeAlias = L["==", "!=", "<", "<=", ">", ">="] -_RelFortran: TypeAlias = L[".eq.", ".ne.", ".lt.", ".le.", ".gt.", ".ge."] +type _RelCOrPy = L["==", "!=", "<", "<=", ">", ">="] +type _RelFortran = L[".eq.", ".ne.", ".lt.", ".le.", ".gt.", ".ge."] -_ToExpr: TypeAlias = Expr | complex | str -_ToExprN: TypeAlias = _ToExpr | tuple[_ToExprN, ...] -_NestedString: TypeAlias = str | tuple[_NestedString, ...] | list[_NestedString] +type _ToExpr = Expr | complex | str +type _ToExprN = _ToExpr | tuple[_ToExprN, ...] +type _NestedString = str | tuple[_NestedString, ...] | list[_NestedString] ### @@ -97,8 +94,8 @@ class Precedence(Enum): NONE = 100 class Expr(Generic[_OpT_co, _DataT_co]): - op: _OpT_co - data: _DataT_co + op: _OpT_co # read-only + data: _DataT_co # read-only @staticmethod def parse(s: str, language: Language = ...) -> Expr: ... @@ -149,7 +146,7 @@ class Expr(Generic[_OpT_co, _DataT_co]): # @overload - def __getitem__(self, index: _ExprT | tuple[_ExprT], /) -> Expr[L[Op.INDEXING], tuple[Self, _ExprT]]: ... + def __getitem__[ExprT: Expr](self, index: ExprT | tuple[ExprT], /) -> Expr[L[Op.INDEXING], tuple[Self, ExprT]]: ... @overload def __getitem__(self, index: _ToExpr | tuple[_ToExpr], /) -> Expr[L[Op.INDEXING], tuple[Self, Expr]]: ... @@ -158,9 +155,9 @@ class Expr(Generic[_OpT_co, _DataT_co]): # @overload - def traverse(self, /, visit: Callable[_Tss, None], *args: _Tss.args, **kwargs: _Tss.kwargs) -> Expr: ... + def traverse[**Tss](self, /, visit: Callable[Tss, None], *args: Tss.args, **kwargs: Tss.kwargs) -> Expr: ... @overload - def traverse(self, /, visit: Callable[_Tss, _ExprT], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _ExprT: ... + def traverse[**Tss, ExprT: Expr](self, /, visit: Callable[Tss, ExprT], *args: Tss.args, **kwargs: Tss.kwargs) -> ExprT: ... # def contains(self, /, other: Expr) -> bool: ... @@ -176,23 +173,23 @@ class Expr(Generic[_OpT_co, _DataT_co]): def tostring(self, /, parent_precedence: Precedence = ..., language: Language = ...) -> str: ... class _Pair(Generic[_LeftT_co, _RightT_co]): - left: _LeftT_co - right: _RightT_co + left: _LeftT_co # read-only + right: _RightT_co # read-only def __init__(self, /, left: _LeftT_co, right: _RightT_co) -> None: ... # @overload - def substitute(self: _Pair[_ExprT1, _ExprT2], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Expr]: ... + def substitute[ExprT: Expr](self: _Pair[ExprT, ExprT], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Expr]: ... @overload - def substitute(self: _Pair[_ExprT1, object], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Any]: ... + def substitute[ExprT: Expr](self: _Pair[ExprT, object], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Any]: ... @overload - def substitute(self: _Pair[object, _ExprT2], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Any, Expr]: ... + def substitute[ExprT: Expr](self: _Pair[object, ExprT], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Any, Expr]: ... @overload def substitute(self, /, symbols_map: Mapping[Expr, Expr]) -> _Pair: ... class _FromStringWorker(Generic[_LanguageT_co]): - language: _LanguageT_co + language: _LanguageT_co # read-only original: str | None quotes_map: dict[str, str] @@ -216,4 +213,4 @@ class _FromStringWorker(Generic[_LanguageT_co]): @overload def process(self, /, s: tuple[str, ...], context: str = "expr") -> tuple[Expr | _Pair, ...]: ... @overload - def process(self, /, s: _NestedString, context: str = "expr") -> Any: ... # noqa: ANN401 + def process(self, /, s: _NestedString, context: str = "expr") -> Any: ... diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c index 25866f1a40ec..99bfca3322c9 100644 --- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -120,10 +120,37 @@ static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self, PyArray_ITEMSIZE(arr)); } +static char doc_f2py_rout_wrap_resolve_write_back_if_copy[] = "\ +Function signature:\n\ + resolvewritebackifcopy(arr)\n\ + Calls PyArray_ResolveWriteBackIfCopy\n\ +Required arguments:\n" +" arr : input array object\n" +"Return objects:\n" +" return_code : int\n" +; +static PyObject *f2py_rout_wrap_resolve_write_back_if_copy(PyObject *capi_self, + PyObject *capi_args) { + PyObject *arr_capi = Py_None; + PyArrayObject *arr = NULL; + if (!PyArg_ParseTuple(capi_args,"O!|:wrap.resolve_write_back_if_copy", + &PyArray_Type,&arr_capi)) { + return NULL; + } + arr = (PyArrayObject *)arr_capi; + int res = PyArray_ResolveWritebackIfCopy(arr); + if (res < 0) { + return NULL; + } + return Py_BuildValue("i",res); +} + static PyMethodDef f2py_module_methods[] = { {"call",f2py_rout_wrap_call,METH_VARARGS,doc_f2py_rout_wrap_call}, {"array_attrs",f2py_rout_wrap_attrs,METH_VARARGS,doc_f2py_rout_wrap_attrs}, + {"resolve_write_back_if_copy",f2py_rout_wrap_resolve_write_back_if_copy, + METH_VARARGS,doc_f2py_rout_wrap_resolve_write_back_if_copy}, {NULL,NULL} }; diff --git a/numpy/f2py/tests/src/inplace/foo.f b/numpy/f2py/tests/src/inplace/foo.f new file mode 100644 index 000000000000..ac85112beda8 --- /dev/null +++ b/numpy/f2py/tests/src/inplace/foo.f @@ -0,0 +1,31 @@ +c Test inplace calculations in array c, by squaring all its values. +c As a sanity check on the input, stores the original content in copy. + subroutine inplace(c, m1, m2, copy) + integer*4 m1, m2, i, j + real*4 c(m1, m2), copy(m1, m2) +cf2py intent(inplace) c +cf2py intent(out) copy +cf2py integer, depend(c), intent(hide) :: m1 = len(c) +cf2py integer, depend(c), intent(hide) :: m2 = shape(c, 1) + do i=1,m1 + do j=1,m2 + copy(i, j) = c(i, j) + c(i, j) = c(i, j) ** 2 + end do + end do + end + + subroutine inplace_out(c, m1, m2, copy) + integer*4 m1, m2, i, j + real*4 c(m1, m2), copy(m1, m2) +cf2py intent(inplace, out) c +cf2py intent(out) copy +cf2py integer, depend(c), intent(hide) :: m1 = len(c) +cf2py integer, depend(c), intent(hide) :: m2 = shape(c, 1) + do i=1,m1 + do j=1,m2 + copy(i, j) = c(i, j) + c(i, j) = c(i, j) ** 2 + end do + end do + end diff --git a/numpy/f2py/tests/src/regression/complex_struct_compat.f90 b/numpy/f2py/tests/src/regression/complex_struct_compat.f90 new file mode 100644 index 000000000000..3e673ee3a4d7 --- /dev/null +++ b/numpy/f2py/tests/src/regression/complex_struct_compat.f90 @@ -0,0 +1,8 @@ + subroutine zero_imag(c, n) + complex*16, intent(inout) :: c(n) + integer, intent(in) :: n + integer :: k + do k = 1, n + c(k) = cmplx(dble(c(k)), 0.0d0, kind=8) + end do + end subroutine diff --git a/numpy/f2py/tests/src/regression/complex_struct_compat.pyf b/numpy/f2py/tests/src/regression/complex_struct_compat.pyf new file mode 100644 index 000000000000..fd0f4d73b23c --- /dev/null +++ b/numpy/f2py/tests/src/regression/complex_struct_compat.pyf @@ -0,0 +1,12 @@ +python module _complex_struct_compat_test + interface + subroutine zero_imag(c, n) + callstatement { int k; for(k=0;ki = 0.0; } + callprotoargument complex_double*, int* + + complex*16 intent(inout), dimension(n) :: c + integer intent(hide), depend(c) :: n = shape(c,0) + + end subroutine zero_imag + end interface +end python module _complex_struct_compat_test diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index 15383e9431cc..ee57d5d65b2f 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -321,7 +321,17 @@ def __init__(self, typ, dims, intent, obj): assert self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:], repr(( self.arr_attr[5], self.pyarr_attr[5] )) # descr - assert self.arr_attr[6] == self.pyarr_attr[6], repr(( + arr_flags = self.arr_attr[6] + if intent.is_intent("inplace") and not ( + obj.dtype == typ and obj.flags["F_CONTIGUOUS"] + ): + assert flags2names(8192) == ["WRITEBACKIFCOPY"] + assert (arr_flags & 8192), f"{flags2names(8192)} not set." + arr_flags -= 8192 # Not easy to set on pyarr. + else: + assert not (arr_flags & 8192) + + assert arr_flags == self.pyarr_attr[6], repr(( self.arr_attr[6], self.pyarr_attr[6], flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]), @@ -651,14 +661,34 @@ def test_inplace(self): assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] shape = obj.shape a = self.array(shape, intent.inplace, obj) + # Spot check that they contain the same information initially. assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) - a.arr[1][2] = 54 - assert obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype) + # If we change a.arr, that will not immediately be reflected in obj. + change_item = 54 if self.type.dtype != bool else False + a.arr[1][2] = change_item + assert a.arr[1][2] == np.array(change_item, dtype=self.type.dtype) + assert obj[1][2] != np.array(change_item, dtype=self.type.dtype) + # This is because our implementation uses writebackifcopy. + assert a.arr.flags["WRITEBACKIFCOPY"] + assert a.arr.base is obj + # It has a different organization from obj. + assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] + # If we resolve the write-back, obj will be properly filled. + code = wrap.resolve_write_back_if_copy(a.arr) + assert code == 1, "no write-back resolution was done!" + assert obj[1][2] == np.array(change_item, dtype=self.type.dtype) + # Check that the original's attributes are not messed up. + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] + + def test_inplace_f_order(self): + # If the input array is suitable, it will just be used. + obj = np.array(self.num23seq, dtype=self.type.dtype, order="F") + assert obj.flags["FORTRAN"] and not obj.flags["CONTIGUOUS"] + a = self.array(obj.shape, intent.inplace, obj) assert a.arr is obj - assert obj.flags["FORTRAN"] # obj attributes are changed inplace! - assert not obj.flags["CONTIGUOUS"] def test_inplace_from_casttype(self): + # Similar to above, but including casting. for t in self.type.cast_types(): if t is self.type: continue @@ -667,12 +697,33 @@ def test_inplace_from_casttype(self): assert obj.dtype.type is not self.type.type assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] shape = obj.shape - a = self.array(shape, intent.inplace, obj) + same_kind = obj.dtype.kind == self.type.dtype.kind + # We avoid pytest.raises here since if the error is not raised, + # we need to do the callback to avoid a runtime warning. + try: + a = self.array(shape, intent.inplace, obj) + except ValueError as exc: + assert not same_kind, "Array not created while having same kind" + assert "not compatible" in str(exc) + return + + if not same_kind: + # Shouldn't happen! Resolve write-back to get right error. + wrap.resolve_write_back_if_copy(a.arr) + assert same_kind, "Array created despite not having same kind" + assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) - a.arr[1][2] = 54 - assert obj[1][2] == a.arr[1][2] == np.array(54, - dtype=self.type.dtype) - assert a.arr is obj - assert obj.flags["FORTRAN"] # obj attributes changed inplace! - assert not obj.flags["CONTIGUOUS"] - assert obj.dtype.type is self.type.type # obj changed inplace! + change_item = 54 if self.type.dtype != bool else False + a.arr[1][2] = change_item + assert a.arr[1][2] == np.array(change_item, dtype=self.type.dtype) + # Not yet propagated. + assert obj[1][2] != np.array(change_item, dtype=self.type.dtype) + assert a.arr.flags["WRITEBACKIFCOPY"] + assert a.arr.base is obj + # Propagate back to obj. + code = wrap.resolve_write_back_if_copy(a.arr) + assert code == 1, "no write-back resolution was done!" + assert obj[1][2] == np.array(change_item, dtype=self.type.dtype) + # Should not affect attributes. + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] + assert obj.dtype.type is not self.type.type diff --git a/numpy/f2py/tests/test_block_docstring.py b/numpy/f2py/tests/test_block_docstring.py index ba255a1b473c..0a35f2e34a7e 100644 --- a/numpy/f2py/tests/test_block_docstring.py +++ b/numpy/f2py/tests/test_block_docstring.py @@ -2,8 +2,6 @@ import pytest -from numpy.testing import IS_PYPY - from . import util @@ -13,8 +11,6 @@ class TestBlockDocString(util.F2PyTest): @pytest.mark.skipif(sys.platform == "win32", reason="Fails with MinGW64 Gfortran (Issue #9673)") - @pytest.mark.xfail(IS_PYPY, - reason="PyPy cannot modify tp_doc after PyType_Ready") def test_block_docstring(self): expected = "bar : 'i'-array(2,3)\n" assert self.module.block.__doc__ == expected diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index 6614efb16db8..1560c73d01fc 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -9,7 +9,6 @@ import pytest import numpy as np -from numpy.testing import IS_PYPY from . import util @@ -22,8 +21,6 @@ class TestF77Callback(util.F2PyTest): def test_all(self, name): self.check_function(name) - @pytest.mark.xfail(IS_PYPY, - reason="PyPy cannot modify tp_doc after PyType_Ready") def test_docstring(self): expected = textwrap.dedent("""\ a = t(fun,[fun_extra_args]) diff --git a/numpy/f2py/tests/test_capi_maps.py b/numpy/f2py/tests/test_capi_maps.py new file mode 100644 index 000000000000..739b453c0f5e --- /dev/null +++ b/numpy/f2py/tests/test_capi_maps.py @@ -0,0 +1,10 @@ +from numpy.f2py import capi_maps + + +def test_complex_long_double_capi_map(): + assert capi_maps.c2capi_map["complex_long_double"] == "NPY_CLONGDOUBLE" + + +def test_complex_long_double_is_distinct(): + assert capi_maps.c2pycode_map["complex_long_double"] != capi_maps.c2pycode_map["complex_double"] + assert capi_maps.c2capi_map["complex_long_double"] != capi_maps.c2capi_map["complex_double"] diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py index c3967cfb967b..aeef49d1f4b0 100644 --- a/numpy/f2py/tests/test_crackfortran.py +++ b/numpy/f2py/tests/test_crackfortran.py @@ -60,7 +60,7 @@ def test_access_type(self, tmp_path): assert set(tt['b_']['attrspec']) == {'public', 'bind(c)'} assert set(tt['c']['attrspec']) == {'public'} - def test_nowrap_private_proceedures(self, tmp_path): + def test_nowrap_private_procedures(self, tmp_path): fpath = util.getpath("tests", "src", "crackfortran", "gh23879.f90") mod = crackfortran.crackfortran([str(fpath)]) assert len(mod) == 1 diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index 959e1527c482..90063d474a33 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -1,3 +1,4 @@ +import os import platform import re import shlex @@ -232,10 +233,8 @@ def test_untitled_cli(capfd, hello_world_f90, monkeypatch): out, _ = capfd.readouterr() assert "untitledmodule.c" in out - -@pytest.mark.skipif((platform.system() != 'Linux') or (sys.version_info <= (3, 12)), reason='Compiler and 3.12 required') -def test_no_py312_distutils_fcompiler(capfd, hello_world_f90, monkeypatch): - """Check that no distutils imports are performed on 3.12 +def test_no_distutils_backend(capfd, hello_world_f90, monkeypatch): + """Check that distutils backend and related options fail CLI :: --fcompiler --help-link --backend distutils """ MNAME = "hi" @@ -248,22 +247,23 @@ def test_no_py312_distutils_fcompiler(capfd, hello_world_f90, monkeypatch): compiler_check_f2pycli() out, _ = capfd.readouterr() assert "--fcompiler cannot be used with meson" in out + monkeypatch.setattr( sys, "argv", ["f2py", "--help-link"] ) - with util.switchdir(ipath.parent): + with pytest.raises(SystemExit): f2pycli() out, _ = capfd.readouterr() - assert "Use --dep for meson builds" in out - MNAME = "hi2" # Needs to be different for a new -c + assert "Unknown option --help-link" in out + monkeypatch.setattr( - sys, "argv", f"f2py {ipath} -c -m {MNAME} --backend distutils".split() + sys, "argv", ["f2py", "--backend", "distutils"] ) - with util.switchdir(ipath.parent): + with pytest.raises(SystemExit): + compiler_check_f2pycli() f2pycli() out, _ = capfd.readouterr() - assert "Cannot use distutils backend with Python>=3.12" in out - + assert "'distutils' backend was removed" in out @pytest.mark.xfail def test_f2py_skip(capfd, retreal_f77, monkeypatch): @@ -832,7 +832,8 @@ def test_freethreading_compatible(hello_world_f90, monkeypatch): rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') eout = ' Hello World\n' assert rout.stdout == eout - assert rout.stderr == "" + if "LSAN_OPTIONS" not in os.environ: + assert rout.stderr == "" assert rout.returncode == 0 diff --git a/numpy/f2py/tests/test_inplace.py b/numpy/f2py/tests/test_inplace.py new file mode 100644 index 000000000000..35af2a42db6a --- /dev/null +++ b/numpy/f2py/tests/test_inplace.py @@ -0,0 +1,49 @@ + +import pytest + +import numpy as np +from numpy.f2py.tests import util +from numpy.testing import assert_array_equal + + +@pytest.mark.slow +class TestInplace(util.F2PyTest): + sources = [util.getpath("tests", "src", "inplace", "foo.f")] + + @pytest.mark.parametrize("func", ["inplace", "inplace_out"]) + @pytest.mark.parametrize("writeable", ["writeable", "readonly"]) + @pytest.mark.parametrize("view", [ + None, (), (slice(None, 2, None), slice(None, None, 2))]) + @pytest.mark.parametrize("dtype", ["f4", "f8"]) + def test_inplace(self, dtype, view, writeable, func): + # Test inplace modifications of an input array. + a = np.arange(12.0, dtype=dtype).reshape((3, 4)).copy() + a.flags.writeable = writeable == "writeable" + k = a if view is None else a[view] + + ffunc = getattr(self.module, func) + if not a.flags.writeable: + with pytest.raises(ValueError, match="WRITEBACKIFCOPY base is read-only"): + ffunc(k) + return + + ref_k = k + exp_copy = k.copy() + exp_k = k ** 2 + exp_a = a.copy() + exp_a[view or ()] = exp_k + if func == "inplace_out": + kout, copy = ffunc(k) + assert kout is k + else: + copy = ffunc(k) + assert_array_equal(copy, exp_copy) + assert k is ref_k + assert np.allclose(k, exp_k) + assert np.allclose(a, exp_a) + + @pytest.mark.parametrize("func", ["inplace", "inplace_out"]) + def test_inplace_error(self, func): + ffunc = getattr(self.module, func) + with pytest.raises(ValueError, match="input.*not compatible"): + ffunc(np.array([1 + 1j])) diff --git a/numpy/f2py/tests/test_mixed.py b/numpy/f2py/tests/test_mixed.py index 07f43e2bcfaa..bb3a5e541859 100644 --- a/numpy/f2py/tests/test_mixed.py +++ b/numpy/f2py/tests/test_mixed.py @@ -2,8 +2,6 @@ import pytest -from numpy.testing import IS_PYPY - from . import util @@ -20,8 +18,6 @@ def test_all(self): assert self.module.foo_fixed.bar12() == 12 assert self.module.foo_free.bar13() == 13 - @pytest.mark.xfail(IS_PYPY, - reason="PyPy cannot modify tp_doc after PyType_Ready") def test_docstring(self): expected = textwrap.dedent("""\ a = bar11() diff --git a/numpy/f2py/tests/test_modules.py b/numpy/f2py/tests/test_modules.py index 96d5ffc66093..25d9a6778950 100644 --- a/numpy/f2py/tests/test_modules.py +++ b/numpy/f2py/tests/test_modules.py @@ -2,8 +2,6 @@ import pytest -from numpy.testing import IS_PYPY - from . import util @@ -42,7 +40,6 @@ def test_gh26920(self): class TestModuleDocString(util.F2PyTest): sources = [util.getpath("tests", "src", "modules", "module_data_docstring.f90")] - @pytest.mark.xfail(IS_PYPY, reason="PyPy cannot modify tp_doc after PyType_Ready") def test_module_docstring(self): assert self.module.mod.__doc__ == textwrap.dedent( """\ @@ -61,12 +58,19 @@ class TestModuleAndSubroutine(util.F2PyTest): sources = [ util.getpath("tests", "src", "modules", "gh25337", "data.f90"), util.getpath("tests", "src", "modules", "gh25337", "use_data.f90"), + util.getpath("tests", "src", "regression", "datonly.f90"), ] def test_gh25337(self): self.module.data.set_shift(3) assert "data" in dir(self.module) + def test_allocatable_in_dir(self): + # gh-27696: allocatable arrays should appear in dir() + names = dir(self.module.datonly) + assert "data_array" in names + assert "max_value" in names + @pytest.mark.slow class TestUsedModule(util.F2PyTest): diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index c4636a764914..96d92aaaa292 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -123,7 +123,7 @@ def test_gh26466(self): res = self.module.testsub2() npt.assert_allclose(expected, res) -class TestF90Contiuation(util.F2PyTest): +class TestF90Continuation(util.F2PyTest): # Check that comments are stripped from F90 continuation lines sources = [util.getpath("tests", "src", "regression", "f90continuation.f90")] @@ -175,6 +175,23 @@ def test_gh25784(): assert "unknown_subroutine_" in str(rerr) +@pytest.mark.slow +class TestComplexStructCompat(util.F2PyTest): + # Check that .r/.i field access works on complex_double pointers in + # callstatements (scipy compatibility, gh-30966 follow-up) + sources = [ + util.getpath("tests", "src", "regression", "complex_struct_compat.pyf"), + util.getpath("tests", "src", "regression", "complex_struct_compat.f90"), + ] + module_name = "_complex_struct_compat_test" + + def test_complex_struct_field_access(self): + c = np.array([1 + 2j, 3 + 4j, 5 + 6j], dtype=np.complex128) + self.module.zero_imag(c) + npt.assert_array_equal(c.imag, [0.0, 0.0, 0.0]) + npt.assert_array_equal(c.real, [1.0, 3.0, 5.0]) + + @pytest.mark.slow class TestAssignmentOnlyModules(util.F2PyTest): # Ensure that variables are exposed without functions or subroutines in a module diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 35e5d3bd8ac0..220e092ebbad 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -161,7 +161,7 @@ def get_temp_module_name(): # Assume single-threaded, and the module dir usable only by this thread global _module_num get_module_dir() - name = "_test_ext_module_%d" % _module_num + name = f"_test_ext_module_{_module_num}" _module_num += 1 if name in sys.modules: # this should not be possible, but check anyway @@ -358,9 +358,9 @@ def build_meson(source_files, module_name=None, **kwargs): class F2PyTest: code = None sources = None - options = [] - skip = [] - only = [] + options = [] # noqa: RUF012 + skip = [] # noqa: RUF012 + only = [] # noqa: RUF012 suffix = ".f" module = None _has_c_compiler = None diff --git a/numpy/f2py/use_rules.py b/numpy/f2py/use_rules.py index 1e06f6c01a39..1cc7df18fa71 100644 --- a/numpy/f2py/use_rules.py +++ b/numpy/f2py/use_rules.py @@ -47,11 +47,12 @@ def buildusevars(m, r): revmap = {} if 'map' in r: for k in r['map'].keys(): - if r['map'][k] in revmap: - outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n' % ( - r['map'][k], k, revmap[r['map'][k]])) + mapped_name = r['map'][k] + if mapped_name in revmap: + outmess(f'\t\t\tVariable "{mapped_name}<={k}" is already mapped by ' + f'"{revmap[mapped_name]}". Skipping.\n') else: - revmap[r['map'][k]] = k + revmap[mapped_name] = k if r.get('only'): for v in r['map'].keys(): if r['map'][v] in m['vars']: @@ -72,8 +73,8 @@ def buildusevars(m, r): def buildusevar(name, realname, vars, usemodulename): - outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n' % ( - name, realname)) + outmess('\t\t\tConstructing wrapper function for variable ' + f'"{name}=>{realname}"...\n') ret = {} vrd = {'name': name, 'realname': realname, diff --git a/numpy/fft/_helper.py b/numpy/fft/_helper.py index 77adeac9207f..b3598534bcdf 100644 --- a/numpy/fft/_helper.py +++ b/numpy/fft/_helper.py @@ -156,7 +156,7 @@ def fftfreq(n, d=1.0, device=None): Examples -------- >>> import numpy as np - >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=np.float64) >>> fourier = np.fft.fft(signal) >>> n = signal.size >>> timestep = 0.1 @@ -215,7 +215,7 @@ def rfftfreq(n, d=1.0, device=None): Examples -------- >>> import numpy as np - >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=np.float64) >>> fourier = np.fft.rfft(signal) >>> n = signal.size >>> sample_rate = 100 diff --git a/numpy/fft/_helper.pyi b/numpy/fft/_helper.pyi index 1ea451ec2eb1..f67737244805 100644 --- a/numpy/fft/_helper.pyi +++ b/numpy/fft/_helper.pyi @@ -1,44 +1,145 @@ -from typing import Any, Final, Literal as L, TypeVar, overload - -from numpy import complexfloating, floating, generic, integer -from numpy._typing import ( - ArrayLike, - NDArray, - _ArrayLike, - _ArrayLikeComplex_co, - _ArrayLikeFloat_co, - _ShapeLike, -) +from typing import Any, Final, Literal as L, overload -__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] +import numpy as np +from numpy._typing import ArrayLike, NDArray, _ArrayLike, _Shape, _ShapeLike -_ScalarT = TypeVar("_ScalarT", bound=generic) +__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] ### -integer_types: Final[tuple[type[int], type[integer]]] = ... +type _Device = L["cpu"] + +type _IntLike = int | np.integer + +type _AsFloat64 = np.float64 | np.float32 | np.float16 | np.integer | np.bool +type _AsComplex128 = np.complex128 | np.complex64 +type _Inexact80 = np.longdouble | np.clongdouble + +type _Array[ShapeT: _Shape, ScalarT: np.generic] = np.ndarray[ShapeT, np.dtype[ScalarT]] +type _1D = tuple[int] ### +integer_types: Final[tuple[type[int], type[np.integer]]] = ... + +# keep in sync with `ifftshift` below @overload -def fftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +def fftshift[ShapeT: _Shape, DTypeT: np.dtype]( + x: np.ndarray[ShapeT, DTypeT], + axes: _ShapeLike | None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload +def fftshift[ScalarT: np.generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... -# +# keep in sync with `fftshift` above +@overload +def ifftshift[ShapeT: _Shape, DTypeT: np.dtype]( + x: np.ndarray[ShapeT, DTypeT], + axes: _ShapeLike | None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... @overload -def ifftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +def ifftshift[ScalarT: np.generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... -# -@overload -def fftfreq(n: int | integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[floating]: ... -@overload -def fftfreq(n: int | integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[complexfloating]: ... +# keep in sync with `rfftfreq` below +@overload # 0d +f64 (default) +def fftfreq( + n: _IntLike, + d: _AsFloat64 | float = 1.0, + device: _Device | None = None, +) -> _Array[_1D, np.float64]: ... +@overload # 0d c64 | c128 +def fftfreq( + n: _IntLike, + d: _AsComplex128, + device: _Device | None = None, +) -> _Array[_1D, np.complex128]: ... +@overload # 0d +complex +def fftfreq( + n: _IntLike, + d: complex, + device: _Device | None = None, +) -> _Array[_1D, np.complex128 | Any]: ... +@overload # 0d T: f80 | c160 +def fftfreq[ScalarT: _Inexact80]( + n: _IntLike, + d: ScalarT, + device: _Device | None = None, +) -> _Array[_1D, ScalarT]: ... +@overload # nd +f64 +def fftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, _AsFloat64], + device: _Device | None = None, +) -> _Array[ShapeT, np.float64]: ... +@overload # nd c64 | c128 +def fftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, _AsComplex128], + device: _Device | None = None, +) -> _Array[ShapeT, np.complex128]: ... +@overload # nd T: f80 | c160 +def fftfreq[ShapeT: _Shape, LongDoubleT: _Inexact80]( + n: _IntLike, + d: _Array[ShapeT, LongDoubleT], + device: _Device | None = None, +) -> _Array[ShapeT, LongDoubleT]: ... +@overload # nd +complex (fallback) +def fftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, np.number | np.bool], + device: _Device | None = None, +) -> _Array[ShapeT, Any]: ... -# -@overload -def rfftfreq(n: int | integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[floating]: ... -@overload -def rfftfreq(n: int | integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[complexfloating]: ... +# keep in sync with `fftfreq` above +@overload # 0d +f64 (default) +def rfftfreq( + n: _IntLike, + d: _AsFloat64 | float = 1.0, + device: _Device | None = None, +) -> _Array[_1D, np.float64]: ... +@overload # 0d c64 | c128 +def rfftfreq( + n: _IntLike, + d: _AsComplex128, + device: _Device | None = None, +) -> _Array[_1D, np.complex128]: ... +@overload # 0d +complex +def rfftfreq( + n: _IntLike, + d: complex, + device: _Device | None = None, +) -> _Array[_1D, np.complex128 | Any]: ... +@overload # 0d T: f80 | c160 +def rfftfreq[LongDoubleT: _Inexact80]( + n: _IntLike, + d: LongDoubleT, + device: _Device | None = None, +) -> _Array[_1D, LongDoubleT]: ... +@overload # nd +f64 +def rfftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, _AsFloat64], + device: _Device | None = None, +) -> _Array[ShapeT, np.float64]: ... +@overload # nd c64 | c128 +def rfftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, _AsComplex128], + device: _Device | None = None, +) -> _Array[ShapeT, np.complex128]: ... +@overload # nd T: f80 | c160 +def rfftfreq[ShapeT: _Shape, LongDoubleT: _Inexact80]( + n: _IntLike, + d: _Array[ShapeT, LongDoubleT], + device: _Device | None = None, +) -> _Array[ShapeT, LongDoubleT]: ... +@overload # nd +complex (fallback) +def rfftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, np.number | np.bool], + device: _Device | None = None, +) -> _Array[ShapeT, Any]: ... diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index 1ce7c76b8636..90de21607ad2 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -302,7 +302,7 @@ def ifft(a, n=None, axis=-1, norm=None, out=None): >>> import matplotlib.pyplot as plt >>> t = np.arange(400) - >>> n = np.zeros((400,), dtype=complex) + >>> n = np.zeros((400,), dtype=np.complex128) >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) >>> s = np.fft.ifft(n) >>> plt.plot(t, s.real, label='real') @@ -625,7 +625,7 @@ def hfft(a, n=None, axis=-1, norm=None, out=None): if n is None: n = (a.shape[axis] - 1) * 2 new_norm = _swap_direction(norm) - output = irfft(conjugate(a), n, axis, norm=new_norm, out=None) + output = irfft(conjugate(a), n, axis, norm=new_norm, out=out) return output @@ -1005,7 +1005,7 @@ def ifftn(a, s=None, axes=None, norm=None, out=None): Create and plot an image with band-limited frequency content: >>> import matplotlib.pyplot as plt - >>> n = np.zeros((200,200), dtype=complex) + >>> n = np.zeros((200,200), dtype=np.complex128) >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) >>> im = np.fft.ifftn(n).real >>> plt.imshow(im) @@ -1260,7 +1260,7 @@ def ifft2(a, s=None, axes=(-2, -1), norm=None, out=None): [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) """ - return _raw_fftnd(a, s, axes, ifft, norm, out=None) + return _raw_fftnd(a, s, axes, ifft, norm, out=out) @array_function_dispatch(_fftn_dispatcher) @@ -1690,4 +1690,4 @@ def irfft2(a, s=None, axes=(-2, -1), norm=None, out=None): [3., 3., 3., 3., 3.], [4., 4., 4., 4., 4.]]) """ - return irfftn(a, s, axes, norm, out=None) + return irfftn(a, s, axes, norm, out=out) diff --git a/numpy/fft/_pocketfft.pyi b/numpy/fft/_pocketfft.pyi index 3234c64ed169..cb7c2a2db8f9 100644 --- a/numpy/fft/_pocketfft.pyi +++ b/numpy/fft/_pocketfft.pyi @@ -1,8 +1,15 @@ from collections.abc import Sequence -from typing import Literal as L, TypeAlias +from typing import Literal as L, overload -from numpy import complex128, float64 -from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co +import numpy as np +from numpy._typing import ( + NDArray, + _ArrayLike, + _ArrayLikeFloat_co, + _ArrayLikeNumber_co, + _Shape, +) +from numpy._typing._array_like import _DualArrayLike __all__ = [ "fft", @@ -21,117 +28,1104 @@ __all__ = [ "ifftn", ] -_NormKind: TypeAlias = L["backward", "ortho", "forward"] | None +type _NormKind = L["backward", "ortho", "forward"] | None +### + +# keep in sync with `ifft` +@overload # Nd complexfloating +def fft[ShapeT: _Shape, DTypeT: np.dtype[np.complexfloating]]( + a: np.ndarray[ShapeT, DTypeT], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd float64 | +integer +def fft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def fft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def fft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +complex +def fft( + a: Sequence[complex], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +complex +def fft( + a: Sequence[Sequence[complex]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d complexfloating +def fft[ScalarT: np.complexfloating]( + a: _ArrayLike[ScalarT], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex +def fft( + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], complex], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback def fft( - a: ArrayLike, + a: _ArrayLikeNumber_co, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def fft[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeNumber_co, n: int | None = None, axis: int = -1, norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + *, + out: ArrayT, +) -> ArrayT: ... +# keep in sync with `fft` +@overload # Nd complexfloating +def ifft[ShapeT: _Shape, DTypeT: np.dtype[np.complexfloating]]( + a: np.ndarray[ShapeT, DTypeT], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd float64 | +integer +def ifft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def ifft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def ifft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +complex +def ifft( + a: Sequence[complex], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +complex +def ifft( + a: Sequence[Sequence[complex]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d complexfloating +def ifft[ScalarT: np.complexfloating]( + a: _ArrayLike[ScalarT], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex def ifft( - a: ArrayLike, + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], complex], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback +def ifft( + a: _ArrayLikeNumber_co, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def ifft[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeNumber_co, n: int | None = None, axis: int = -1, norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + *, + out: ArrayT, +) -> ArrayT: ... +# keep in sync with `ihfft` +@overload # Nd float64 | +integer +def rfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def rfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def rfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +float +def rfft( + a: Sequence[float], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +float def rfft( - a: ArrayLike, + a: Sequence[Sequence[float]], n: int | None = None, axis: int = -1, norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d +float +def rfft( + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback +def rfft( + a: _ArrayLikeFloat_co, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def rfft[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeFloat_co, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... +# keep in sync with `hfft` +@overload # Nd floating +def irfft[ShapeT: _Shape, DTypeT: np.dtype[np.floating]]( + a: np.ndarray[ShapeT, DTypeT], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd complex128 | +integer +def irfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex128 | np.integer | np.bool]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # Nd complex64 +def irfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex64]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... +@overload # Nd clongdouble +def irfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.clongdouble]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... +@overload # 1d +complex +def irfft( + a: Sequence[complex], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... +@overload # 2d +complex def irfft( - a: ArrayLike, + a: Sequence[Sequence[complex]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.float64]]: ... +@overload # ?d floating +def irfft[ScalarT: np.floating]( + a: _ArrayLike[ScalarT], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex | complex128 | +integer +def irfft( + a: _DualArrayLike[np.dtype[np.complex128 | np.integer | np.bool], complex], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.float64]: ... +@overload # fallback +def irfft( + a: _ArrayLikeNumber_co, n: int | None = None, axis: int = -1, norm: _NormKind = None, - out: NDArray[float64] | None = None, -) -> NDArray[float64]: ... + out: None = None, +) -> NDArray[np.floating]: ... +@overload # out: +def irfft[ArrayT: NDArray[np.floating]]( + a: _ArrayLikeNumber_co, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... -# Input array must be compatible with `np.conjugate` +# keep in sync with `irfft` above +@overload # Nd floating +def hfft[ShapeT: _Shape, DTypeT: np.dtype[np.floating]]( + a: np.ndarray[ShapeT, DTypeT], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd complex128 | +integer +def hfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex128 | np.integer | np.bool]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # Nd complex64 +def hfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex64]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... +@overload # Nd clongdouble +def hfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.clongdouble]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... +@overload # 1d +complex +def hfft( + a: Sequence[complex], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... +@overload # 2d +complex +def hfft( + a: Sequence[Sequence[complex]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.float64]]: ... +@overload # ?d floating +def hfft[ScalarT: np.floating]( + a: _ArrayLike[ScalarT], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex | complex128 | +integer +def hfft( + a: _DualArrayLike[np.dtype[np.complex128 | np.integer | np.bool], complex], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.float64]: ... +@overload # fallback def hfft( a: _ArrayLikeNumber_co, n: int | None = None, axis: int = -1, norm: _NormKind = None, - out: NDArray[float64] | None = None, -) -> NDArray[float64]: ... + out: None = None, +) -> NDArray[np.floating]: ... +@overload # out: +def hfft[ArrayT: NDArray[np.floating]]( + a: _ArrayLikeNumber_co, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... +# keep in sync with `rfft` +@overload # Nd float64 | +integer +def ihfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def ihfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def ihfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +float +def ihfft( + a: Sequence[float], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +float +def ihfft( + a: Sequence[Sequence[float]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d +float def ihfft( - a: ArrayLike, + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float], n: int | None = None, axis: int = -1, norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback +def ihfft( + a: _ArrayLikeFloat_co, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def ihfft[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeFloat_co, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... +# keep in sync with `ifftn` +@overload # Nd complexfloating +def fftn[ShapeT: _Shape, DTypeT: np.dtype[np.complexfloating]]( + a: np.ndarray[ShapeT, DTypeT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd float64 | +integer +def fftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def fftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def fftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +complex +def fftn( + a: Sequence[complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +complex +def fftn( + a: Sequence[Sequence[complex]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d complexfloating +def fftn[ScalarT: np.complexfloating]( + a: _ArrayLike[ScalarT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex def fftn( - a: ArrayLike, + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback +def fftn( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def fftn[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeNumber_co, s: Sequence[int] | None = None, axes: Sequence[int] | None = None, norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + *, + out: ArrayT, +) -> ArrayT: ... +# keep in sync with `fftn` +@overload # Nd complexfloating +def ifftn[ShapeT: _Shape, DTypeT: np.dtype[np.complexfloating]]( + a: np.ndarray[ShapeT, DTypeT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd float64 | +integer +def ifftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def ifftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def ifftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +complex +def ifftn( + a: Sequence[complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +complex +def ifftn( + a: Sequence[Sequence[complex]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d complexfloating +def ifftn[ScalarT: np.complexfloating]( + a: _ArrayLike[ScalarT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex +def ifftn( + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback def ifftn( - a: ArrayLike, + a: _ArrayLikeNumber_co, s: Sequence[int] | None = None, axes: Sequence[int] | None = None, norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def ifftn[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... +# +@overload # Nd float64 | +integer +def rfftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def rfftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def rfftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +float +def rfftn( + a: Sequence[float], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +float +def rfftn( + a: Sequence[Sequence[float]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d +float +def rfftn( + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback def rfftn( - a: ArrayLike, + a: _ArrayLikeFloat_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def rfftn[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeFloat_co, s: Sequence[int] | None = None, axes: Sequence[int] | None = None, norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + *, + out: ArrayT, +) -> ArrayT: ... +# +@overload # Nd floating +def irfftn[ShapeT: _Shape, DTypeT: np.dtype[np.floating]]( + a: np.ndarray[ShapeT, DTypeT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd complex128 | +integer +def irfftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex128 | np.integer | np.bool]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # Nd complex64 +def irfftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex64]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... +@overload # Nd clongdouble +def irfftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.clongdouble]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... +@overload # 1d +complex +def irfftn( + a: Sequence[complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... +@overload # 2d +complex +def irfftn( + a: Sequence[Sequence[complex]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.float64]]: ... +@overload # ?d floating +def irfftn[ScalarT: np.floating]( + a: _ArrayLike[ScalarT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex | complex128 | +integer def irfftn( - a: ArrayLike, + a: _DualArrayLike[np.dtype[np.complex128 | np.integer | np.bool], complex], s: Sequence[int] | None = None, axes: Sequence[int] | None = None, norm: _NormKind = None, - out: NDArray[float64] | None = None, -) -> NDArray[float64]: ... + out: None = None, +) -> NDArray[np.float64]: ... +@overload # fallback +def irfftn( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.floating]: ... +@overload # out: +def irfftn[ArrayT: NDArray[np.floating]]( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... +# keep in sync with `ifft2` +@overload # Nd complexfloating +def fft2[ShapeT: _Shape, DTypeT: np.dtype[np.complexfloating]]( + a: np.ndarray[ShapeT, DTypeT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd float64 | +integer +def fft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def fft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def fft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +complex +def fft2( + a: Sequence[complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +complex def fft2( - a: ArrayLike, + a: Sequence[Sequence[complex]], s: Sequence[int] | None = None, axes: Sequence[int] | None = (-2, -1), norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d complexfloating +def fft2[ScalarT: np.complexfloating]( + a: _ArrayLike[ScalarT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex +def fft2( + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback +def fft2( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def fft2[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... +# keep in sync with `fft2` +@overload # Nd complexfloating +def ifft2[ShapeT: _Shape, DTypeT: np.dtype[np.complexfloating]]( + a: np.ndarray[ShapeT, DTypeT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd float64 | +integer +def ifft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def ifft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def ifft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +complex +def ifft2( + a: Sequence[complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +complex +def ifft2( + a: Sequence[Sequence[complex]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d complexfloating +def ifft2[ScalarT: np.complexfloating]( + a: _ArrayLike[ScalarT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex def ifft2( - a: ArrayLike, + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], complex], s: Sequence[int] | None = None, axes: Sequence[int] | None = (-2, -1), norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback +def ifft2( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def ifft2[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... +# +@overload # Nd float64 | +integer +def rfft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def rfft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def rfft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +float +def rfft2( + a: Sequence[float], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +float +def rfft2( + a: Sequence[Sequence[float]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d +float def rfft2( - a: ArrayLike, + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float], s: Sequence[int] | None = None, axes: Sequence[int] | None = (-2, -1), norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback +def rfft2( + a: _ArrayLikeFloat_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def rfft2[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeFloat_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... +# +@overload # Nd floating +def irfft2[ShapeT: _Shape, DTypeT: np.dtype[np.floating]]( + a: np.ndarray[ShapeT, DTypeT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd complex128 | +integer +def irfft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex128 | np.integer | np.bool]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # Nd complex64 +def irfft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex64]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... +@overload # Nd clongdouble +def irfft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.clongdouble]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... +@overload # 1d +complex +def irfft2( + a: Sequence[complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... +@overload # 2d +complex +def irfft2( + a: Sequence[Sequence[complex]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.float64]]: ... +@overload # ?d floating +def irfft2[ScalarT: np.floating]( + a: _ArrayLike[ScalarT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex | complex128 | +integer def irfft2( - a: ArrayLike, + a: _DualArrayLike[np.dtype[np.complex128 | np.integer | np.bool], complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.float64]: ... +@overload # fallback +def irfft2( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.floating]: ... +@overload # out: +def irfft2[ArrayT: NDArray[np.floating]]( + a: _ArrayLikeNumber_co, s: Sequence[int] | None = None, axes: Sequence[int] | None = (-2, -1), norm: _NormKind = None, - out: NDArray[float64] | None = None, -) -> NDArray[float64]: ... + *, + out: ArrayT, +) -> ArrayT: ... diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py index 6f26ab6c6d65..f294a26da58e 100644 --- a/numpy/fft/tests/test_pocketfft.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -216,6 +216,12 @@ def test_ifft2(self): assert_allclose(np.fft.ifft2(x) * (30. * 20.), np.fft.ifft2(x, norm="forward"), atol=1e-6) + def test_ifft2_out(self): + z = np.array([[1 + 2j, 3 - 4j], [0.5 - 2j, 4 + 1j]]) + out = np.zeros_like(z) + result = np.fft.ifft2(z, out=out) + assert result is out + def test_fftn(self): x = random((30, 20, 10)) + 1j * random((30, 20, 10)) assert_allclose( @@ -298,6 +304,13 @@ def test_irfft2(self): assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="forward"), norm="forward"), atol=1e-6) + def test_irfft2_out(self): + z = np.array([[7, 1 + 4j, -5], [2 - 1j, -2 - 1j, -8 + 1j], + [-3, 1 + 2j, 5], [2 + 1j, 4 - 1j, -8 - 1j]]) + out = np.zeros((4, 4), dtype=np.float64) + result = np.fft.irfft2(z, out=out) + assert result is out + def test_rfftn(self): x = random((30, 20, 10)) assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6) @@ -338,6 +351,13 @@ def test_hfft(self): assert_allclose(np.fft.hfft(x_herm) / 30., np.fft.hfft(x_herm, norm="forward"), atol=1e-6) + def test_hfft_out(self): + a = np.array([1, 2, 3, 4, 3, 2], dtype=complex) + n = (len(a) - 1) * 2 + out = np.zeros(n, dtype=np.float64) + result = np.fft.hfft(a, n=n, out=out) + assert result is out + def test_ihfft(self): x = random(14) + 1j * random(14) x_herm = np.concatenate((random(1), x, random(1))) diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py index a248d048f0ec..e14827b5de37 100644 --- a/numpy/lib/__init__.py +++ b/numpy/lib/__init__.py @@ -60,16 +60,9 @@ def __getattr__(attr): # Warn for deprecated/removed aliases - import math import warnings - if attr == "math": - warnings.warn( - "`np.lib.math` is a deprecated alias for the standard library " - "`math` module (Deprecated Numpy 1.25). Replace usages of " - "`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2) - return math - elif attr == "emath": + if attr == "emath": raise AttributeError( "numpy.lib.emath was an alias for emath module that was removed " "in NumPy 2.0. Replace usages of numpy.lib.emath with " diff --git a/numpy/lib/_array_utils_impl.py b/numpy/lib/_array_utils_impl.py index c3996e1f2b92..25d78c1eb6a6 100644 --- a/numpy/lib/_array_utils_impl.py +++ b/numpy/lib/_array_utils_impl.py @@ -30,7 +30,7 @@ def byte_bounds(a): Examples -------- >>> import numpy as np - >>> I = np.eye(2, dtype='f'); I.dtype + >>> I = np.eye(2, dtype=np.float32); I.dtype dtype('float32') >>> low, high = np.lib.array_utils.byte_bounds(I) >>> high - low == I.size*I.itemsize diff --git a/numpy/lib/_arraypad_impl.pyi b/numpy/lib/_arraypad_impl.pyi index e7aacea43254..1d41ee58fa47 100644 --- a/numpy/lib/_arraypad_impl.pyi +++ b/numpy/lib/_arraypad_impl.pyi @@ -1,20 +1,10 @@ -from typing import ( - Any, - Literal as L, - Protocol, - TypeAlias, - TypeVar, - overload, - type_check_only, -) +from typing import Any, Literal as L, Protocol, overload, type_check_only -from numpy import generic -from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeInt +import numpy as np +from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeInt, _Shape __all__ = ["pad"] -_ScalarT = TypeVar("_ScalarT", bound=generic) - @type_check_only class _ModeFunc(Protocol): def __call__( @@ -26,7 +16,7 @@ class _ModeFunc(Protocol): /, ) -> None: ... -_ModeKind: TypeAlias = L[ +type _ModeKind = L[ "constant", "edge", "linear_ramp", @@ -40,19 +30,33 @@ _ModeKind: TypeAlias = L[ "empty", ] -# TODO: In practice each keyword argument is exclusive to one or more -# specific modes. Consider adding more overloads to express this in the future. - -_PadWidth: TypeAlias = ( +type _PadWidth = ( _ArrayLikeInt | dict[int, int] | dict[int, tuple[int, int]] | dict[int, int | tuple[int, int]] ) + +### + +# TODO: In practice each keyword argument is exclusive to one or more +# specific modes. Consider adding more overloads to express this in the future. + # Expand `**kwargs` into explicit keyword-only arguments @overload -def pad( - array: _ArrayLike[_ScalarT], +def pad[ShapeT: _Shape, DTypeT: np.dtype]( + array: np.ndarray[ShapeT, DTypeT], + pad_width: _PadWidth, + mode: _ModeKind = "constant", + *, + stat_length: _ArrayLikeInt | None = None, + constant_values: ArrayLike = 0, + end_values: ArrayLike = 0, + reflect_type: L["odd", "even"] = "even", +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload +def pad[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], pad_width: _PadWidth, mode: _ModeKind = "constant", *, @@ -60,7 +64,7 @@ def pad( constant_values: ArrayLike = 0, end_values: ArrayLike = 0, reflect_type: L["odd", "even"] = "even", -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def pad( array: ArrayLike, @@ -73,12 +77,19 @@ def pad( reflect_type: L["odd", "even"] = "even", ) -> NDArray[Any]: ... @overload -def pad( - array: _ArrayLike[_ScalarT], +def pad[ShapeT: _Shape, DTypeT: np.dtype]( + array: np.ndarray[ShapeT, DTypeT], + pad_width: _PadWidth, + mode: _ModeFunc, + **kwargs: Any, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload +def pad[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], pad_width: _PadWidth, mode: _ModeFunc, **kwargs: Any, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def pad( array: ArrayLike, diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index da687da03dde..77db8c4f3620 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -1,13 +1,5 @@ -from typing import ( - Any, - Generic, - Literal as L, - NamedTuple, - SupportsIndex, - TypeAlias, - overload, -) -from typing_extensions import TypeVar +from _typeshed import Incomplete +from typing import Any, Literal as L, NamedTuple, SupportsIndex, TypeVar, overload import numpy as np from numpy._typing import ( @@ -32,16 +24,13 @@ __all__ = [ "unique_values", ] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_NumericT = TypeVar("_NumericT", bound=np.number | np.timedelta64 | np.object_) - # Explicitly set all allowed values to prevent accidental castings to # abstract dtypes (their common super-type). # Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) # which could result in, for example, `int64` and `float64`producing a # `number[_64Bit]` array -_EitherSCT = TypeVar( - "_EitherSCT", +_AnyScalarT = TypeVar( + "_AnyScalarT", np.bool, np.int8, np.int16, np.int32, np.int64, np.intp, np.uint8, np.uint16, np.uint32, np.uint64, np.uintp, @@ -52,55 +41,58 @@ _EitherSCT = TypeVar( np.integer, np.floating, np.complexfloating, np.character, ) # fmt: skip -_AnyArray: TypeAlias = NDArray[Any] -_IntArray: TypeAlias = NDArray[np.intp] +type _NumericScalar = np.number | np.timedelta64 | np.object_ +type _IntArray = NDArray[np.intp] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] + +type _IntersectResult[ScalarT: np.generic] = tuple[_Array1D[ScalarT], _Array1D[np.intp], _Array1D[np.intp]] ### -class UniqueAllResult(NamedTuple, Generic[_ScalarT]): - values: NDArray[_ScalarT] - indices: _IntArray +class UniqueAllResult[ScalarT: np.generic](NamedTuple): + values: _Array1D[ScalarT] + indices: _Array1D[np.intp] inverse_indices: _IntArray - counts: _IntArray + counts: _Array1D[np.intp] -class UniqueCountsResult(NamedTuple, Generic[_ScalarT]): - values: NDArray[_ScalarT] - counts: _IntArray +class UniqueCountsResult[ScalarT: np.generic](NamedTuple): + values: _Array1D[ScalarT] + counts: _Array1D[np.intp] -class UniqueInverseResult(NamedTuple, Generic[_ScalarT]): - values: NDArray[_ScalarT] - inverse_indices: _IntArray +class UniqueInverseResult[ScalarT: np.generic](NamedTuple): + values: _Array1D[ScalarT] + inverse_indices: NDArray[np.intp] -# +# keep in sync with `ma.extras.ediff1d` @overload def ediff1d( ary: _ArrayLikeBool_co, to_end: ArrayLike | None = None, to_begin: ArrayLike | None = None, -) -> NDArray[np.int8]: ... +) -> _Array1D[np.int8]: ... @overload -def ediff1d( - ary: _ArrayLike[_NumericT], +def ediff1d[NumericT: _NumericScalar]( + ary: _ArrayLike[NumericT], to_end: ArrayLike | None = None, to_begin: ArrayLike | None = None, -) -> NDArray[_NumericT]: ... +) -> _Array1D[NumericT]: ... @overload def ediff1d( ary: _ArrayLike[np.datetime64[Any]], to_end: ArrayLike | None = None, to_begin: ArrayLike | None = None, -) -> NDArray[np.timedelta64]: ... +) -> _Array1D[np.timedelta64]: ... @overload def ediff1d( ary: _ArrayLikeNumber_co, to_end: ArrayLike | None = None, to_begin: ArrayLike | None = None, -) -> _AnyArray: ... +) -> _Array1D[Incomplete]: ... # @overload # known scalar-type, FFF -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False] = False, return_inverse: L[False] = False, return_counts: L[False] = False, @@ -108,7 +100,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload # unknown scalar-type, FFF def unique( ar: ArrayLike, @@ -119,10 +111,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> _AnyArray: ... +) -> np.ndarray: ... @overload # known scalar-type, TFF -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[False] = False, return_counts: L[False] = False, @@ -130,7 +122,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # unknown scalar-type, TFF def unique( ar: ArrayLike, @@ -141,10 +133,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray]: ... @overload # known scalar-type, FTF (positional) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False], return_inverse: L[True], return_counts: L[False] = False, @@ -152,10 +144,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # known scalar-type, FTF (keyword) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False] = False, *, return_inverse: L[True], @@ -163,7 +155,7 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # unknown scalar-type, FTF (positional) def unique( ar: ArrayLike, @@ -174,7 +166,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray]: ... @overload # unknown scalar-type, FTF (keyword) def unique( ar: ArrayLike, @@ -185,10 +177,10 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray]: ... @overload # known scalar-type, FFT (positional) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False], return_inverse: L[False], return_counts: L[True], @@ -196,10 +188,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # known scalar-type, FFT (keyword) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False] = False, return_inverse: L[False] = False, *, @@ -207,7 +199,7 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # unknown scalar-type, FFT (positional) def unique( ar: ArrayLike, @@ -218,7 +210,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray]: ... @overload # unknown scalar-type, FFT (keyword) def unique( ar: ArrayLike, @@ -229,10 +221,10 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray]: ... @overload # known scalar-type, TTF -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[True], return_counts: L[False] = False, @@ -240,7 +232,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, TTF def unique( ar: ArrayLike, @@ -251,10 +243,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # known scalar-type, TFT (positional) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[False], return_counts: L[True], @@ -262,10 +254,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # known scalar-type, TFT (keyword) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[False] = False, *, @@ -273,7 +265,7 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, TFT (positional) def unique( ar: ArrayLike, @@ -284,7 +276,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, TFT (keyword) def unique( ar: ArrayLike, @@ -295,10 +287,10 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # known scalar-type, FTT (positional) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False], return_inverse: L[True], return_counts: L[True], @@ -306,10 +298,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # known scalar-type, FTT (keyword) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False] = False, *, return_inverse: L[True], @@ -317,7 +309,7 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, FTT (positional) def unique( ar: ArrayLike, @@ -328,7 +320,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, FTT (keyword) def unique( ar: ArrayLike, @@ -339,10 +331,10 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # known scalar-type, TTT -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[True], return_counts: L[True], @@ -350,7 +342,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, TTT def unique( ar: ArrayLike, @@ -361,69 +353,71 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray, _IntArray]: ... # @overload -def unique_all(x: _ArrayLike[_ScalarT]) -> UniqueAllResult[_ScalarT]: ... +def unique_all[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> UniqueAllResult[ScalarT]: ... @overload def unique_all(x: ArrayLike) -> UniqueAllResult[Any]: ... # @overload -def unique_counts(x: _ArrayLike[_ScalarT]) -> UniqueCountsResult[_ScalarT]: ... +def unique_counts[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> UniqueCountsResult[ScalarT]: ... @overload def unique_counts(x: ArrayLike) -> UniqueCountsResult[Any]: ... # @overload -def unique_inverse(x: _ArrayLike[_ScalarT]) -> UniqueInverseResult[_ScalarT]: ... +def unique_inverse[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> UniqueInverseResult[ScalarT]: ... @overload def unique_inverse(x: ArrayLike) -> UniqueInverseResult[Any]: ... # @overload -def unique_values(x: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +def unique_values[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> _Array1D[ScalarT]: ... @overload -def unique_values(x: ArrayLike) -> _AnyArray: ... +def unique_values(x: ArrayLike) -> _Array1D[Incomplete]: ... + +# NOTE: we ignore UP047 because inlining `_AnyScalarT` would result in a lot of code duplication # @overload # known scalar-type, return_indices=False (default) -def intersect1d( - ar1: _ArrayLike[_EitherSCT], - ar2: _ArrayLike[_EitherSCT], +def intersect1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], + ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False, return_indices: L[False] = False, -) -> NDArray[_EitherSCT]: ... +) -> _Array1D[_AnyScalarT]: ... @overload # known scalar-type, return_indices=True (positional) -def intersect1d( - ar1: _ArrayLike[_EitherSCT], - ar2: _ArrayLike[_EitherSCT], +def intersect1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], + ar2: _ArrayLike[_AnyScalarT], assume_unique: bool, return_indices: L[True], -) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ... +) -> _IntersectResult[_AnyScalarT]: ... @overload # known scalar-type, return_indices=True (keyword) -def intersect1d( - ar1: _ArrayLike[_EitherSCT], - ar2: _ArrayLike[_EitherSCT], +def intersect1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], + ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False, *, return_indices: L[True], -) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ... +) -> _IntersectResult[_AnyScalarT]: ... @overload # unknown scalar-type, return_indices=False (default) def intersect1d( ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False, return_indices: L[False] = False, -) -> _AnyArray: ... +) -> _Array1D[Incomplete]: ... @overload # unknown scalar-type, return_indices=True (positional) def intersect1d( ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool, return_indices: L[True], -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> _IntersectResult[Incomplete]: ... @overload # unknown scalar-type, return_indices=True (keyword) def intersect1d( ar1: ArrayLike, @@ -431,25 +425,29 @@ def intersect1d( assume_unique: bool = False, *, return_indices: L[True], -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> _IntersectResult[Incomplete]: ... # @overload -def setxor1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ... +def setxor1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False +) -> _Array1D[_AnyScalarT]: ... @overload -def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ... +def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _Array1D[Incomplete]: ... # @overload -def union1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT]) -> NDArray[_EitherSCT]: ... +def union1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT]) -> _Array1D[_AnyScalarT]: ... # noqa: UP047 @overload -def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _AnyArray: ... +def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _Array1D[Incomplete]: ... # @overload -def setdiff1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ... +def setdiff1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False +) -> _Array1D[_AnyScalarT]: ... @overload -def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ... +def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _Array1D[Incomplete]: ... # def isin( diff --git a/numpy/lib/_arrayterator_impl.pyi b/numpy/lib/_arrayterator_impl.pyi index 5fd589a3ac36..2d221f9007e9 100644 --- a/numpy/lib/_arrayterator_impl.pyi +++ b/numpy/lib/_arrayterator_impl.pyi @@ -2,7 +2,7 @@ from collections.abc import Generator from types import EllipsisType -from typing import Any, Final, TypeAlias, overload +from typing import Any, Final, overload from typing_extensions import TypeVar import numpy as np @@ -10,12 +10,11 @@ from numpy._typing import _AnyShape, _Shape __all__ = ["Arrayterator"] +# Type parameter default syntax (PEP 696) requires Python 3.13+ _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_AnyIndex: TypeAlias = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...] +type _AnyIndex = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...] # NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`, # but its ``__getattr__` method does wrap around the former and thus has @@ -29,17 +28,17 @@ class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]): step: Final[list[int]] @property # type: ignore[misc] - def shape(self) -> _ShapeT_co: ... + def shape(self) -> _ShapeT_co: ... # pyrefly: ignore[bad-override] @property - def flat(self: Arrayterator[Any, np.dtype[_ScalarT]]) -> Generator[_ScalarT]: ... # type: ignore[override] + def flat[ScalarT: np.generic](self: Arrayterator[Any, np.dtype[ScalarT]]) -> Generator[ScalarT]: ... # type: ignore[override] # def __init__(self, /, var: np.ndarray[_ShapeT_co, _DTypeT_co], buf_size: int | None = None) -> None: ... def __getitem__(self, index: _AnyIndex, /) -> Arrayterator[_AnyShape, _DTypeT_co]: ... # type: ignore[override] - def __iter__(self) -> Generator[np.ndarray[_AnyShape, _DTypeT_co]]: ... + def __iter__(self) -> Generator[np.ndarray[_AnyShape, _DTypeT_co]]: ... # pyrefly: ignore[bad-override] # - @overload # type: ignore[override] + @overload def __array__(self, /, dtype: None = None, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__(self, /, dtype: _DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT]: ... + def __array__[DTypeT: np.dtype](self, /, dtype: DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, DTypeT]: ... diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py index 72398c5479f8..1c9331fe553a 100644 --- a/numpy/lib/_datasource.py +++ b/numpy/lib/_datasource.py @@ -155,7 +155,7 @@ def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None): """ Open `path` with `mode` and return the file object. - If ``path`` is an URL, it will be downloaded, stored in the + If ``path`` is a URL, it will be downloaded, stored in the `DataSource` `destpath` directory and opened from there. Parameters @@ -340,7 +340,7 @@ def _cache(self, path): def _findfile(self, path): """Searches for ``path`` and returns full path if found. - If path is an URL, _findfile will cache a local copy and return the + If path is a URL, _findfile will cache a local copy and return the path to the cached file. If path is a local file, _findfile will return a path to that local file. @@ -372,7 +372,7 @@ def abspath(self, path): """ Return absolute path of file in the DataSource directory. - If `path` is an URL, then `abspath` will return either the location + If `path` is a URL, then `abspath` will return either the location the file exists locally or the location it would exist when opened using the `open` method. @@ -448,7 +448,7 @@ def exists(self, path): Notes ----- - When `path` is an URL, `exists` will return True if it's either + When `path` is a URL, `exists` will return True if it's either stored locally in the `DataSource` directory, or is a valid remote URL. `DataSource` does not discriminate between the two, the file is accessible if it exists in either location. @@ -484,7 +484,7 @@ def open(self, path, mode='r', encoding=None, newline=None): """ Open and return file-like object. - If `path` is an URL, it will be downloaded, stored in the + If `path` is a URL, it will be downloaded, stored in the `DataSource` directory and opened from there. Parameters @@ -594,7 +594,7 @@ def abspath(self, path): """ Return absolute path of file in the Repository directory. - If `path` is an URL, then `abspath` will return either the location + If `path` is a URL, then `abspath` will return either the location the file exists locally or the location it would exist when opened using the `open` method. @@ -639,7 +639,7 @@ def exists(self, path): Notes ----- - When `path` is an URL, `exists` will return True if it's either + When `path` is a URL, `exists` will return True if it's either stored locally in the `DataSource` directory, or is a valid remote URL. `DataSource` does not discriminate between the two, the file is accessible if it exists in either location. @@ -651,7 +651,7 @@ def open(self, path, mode='r', encoding=None, newline=None): """ Open and return file-like object prepending Repository base URL. - If `path` is an URL, it will be downloaded, stored in the + If `path` is a URL, it will be downloaded, stored in the DataSource directory and opened from there. Parameters diff --git a/numpy/lib/_datasource.pyi b/numpy/lib/_datasource.pyi index dba0434a5fab..33af9cf1b197 100644 --- a/numpy/lib/_datasource.pyi +++ b/numpy/lib/_datasource.pyi @@ -1,8 +1,8 @@ from _typeshed import OpenBinaryMode, OpenTextMode from pathlib import Path -from typing import IO, Any, TypeAlias +from typing import IO, Any -_Mode: TypeAlias = OpenBinaryMode | OpenTextMode +type _Mode = OpenBinaryMode | OpenTextMode ### diff --git a/numpy/lib/_format_impl.py b/numpy/lib/_format_impl.py index 2bb557709c8b..51b16ce0de48 100644 --- a/numpy/lib/_format_impl.py +++ b/numpy/lib/_format_impl.py @@ -838,9 +838,11 @@ def read_array(fp, allow_pickle=False, pickle_kwargs=None, *, array = pickle.load(fp, **pickle_kwargs) except UnicodeError as err: # Friendlier error message - raise UnicodeError("Unpickling a python object failed: %r\n" - "You may need to pass the encoding= option " - "to numpy.load" % (err,)) from err + raise UnicodeError( + f"Unpickling a python object failed: {err!r}\n" + "You may need to pass the encoding= option " + "to numpy.load" + ) from err else: if isfileobj(fp): # We can use the fast fromfile() function. diff --git a/numpy/lib/_format_impl.pyi b/numpy/lib/_format_impl.pyi index b45df02796d7..f8b9a7ab88a9 100644 --- a/numpy/lib/_format_impl.pyi +++ b/numpy/lib/_format_impl.pyi @@ -1,6 +1,6 @@ import os from _typeshed import SupportsRead, SupportsWrite -from typing import Any, BinaryIO, Final, TypeAlias, TypeGuard +from typing import Any, BinaryIO, Final, TypeGuard import numpy as np import numpy.typing as npt @@ -8,7 +8,7 @@ from numpy.lib._utils_impl import drop_metadata as drop_metadata __all__: list[str] = [] -_DTypeDescr: TypeAlias = list[tuple[str, str]] | list[tuple[str, str, tuple[int, ...]]] +type _DTypeDescr = list[tuple[str, str]] | list[tuple[str, str, tuple[int, ...]]] ### diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 87a834850fda..0d1b9a0331cf 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -81,7 +81,7 @@ # When the sample contains exactly the percentile wanted, the virtual_index is # an integer to the index of this element. # When the percentile wanted is in between two elements, the virtual_index -# is made of a integer part (a.k.a 'i' or 'left') and a fractional part +# is made of an integer part (a.k.a 'i' or 'left') and a fractional part # (a.k.a 'g' or 'gamma') # # Each method in _QuantileMethods has two properties @@ -93,7 +93,7 @@ # --- HYNDMAN and FAN METHODS # Discrete methods 'inverted_cdf': { - 'get_virtual_index': lambda n, quantiles: _inverted_cdf(n, quantiles), # noqa: PLW0108 + 'get_virtual_index': lambda n, quantiles: _inverted_cdf(n, quantiles), 'fix_gamma': None, # should never be called }, 'averaged_inverted_cdf': { @@ -105,7 +105,7 @@ where=gamma == 0), }, 'closest_observation': { - 'get_virtual_index': lambda n, quantiles: _closest_observation(n, quantiles), # noqa: PLW0108 + 'get_virtual_index': lambda n, quantiles: _closest_observation(n, quantiles), 'fix_gamma': None, # should never be called }, # Continuous methods @@ -594,7 +594,7 @@ def average(a, axis=None, weights=None, returned=False, *, if returned: if scl.shape != avg_as_array.shape: - scl = np.broadcast_to(scl, avg_as_array.shape).copy() + scl = np.broadcast_to(scl, avg_as_array.shape, subok=True).copy() return avg, scl else: return avg @@ -654,7 +654,7 @@ class ndarray is returned. ``asarray_chkfinite`` is identical to ``asarray``. >>> a = [1, 2] - >>> np.asarray_chkfinite(a, dtype=float) + >>> np.asarray_chkfinite(a, dtype=np.float64) array([1., 2.]) Raises ValueError if array_like contains Nans or Infs. @@ -823,7 +823,7 @@ def select(condlist, choicelist, default=0): choicelist : list of ndarrays The list of arrays from which the output elements are taken. It has to be of the same length as `condlist`. - default : scalar, optional + default : array_like, optional The element inserted in `output` when all conditions evaluate to False. Returns @@ -1017,7 +1017,7 @@ def gradient(f, *varargs, axis=None, edge_order=1): Spacing between f values. Default unitary spacing for all dimensions. Spacing can be specified using: - 1. single scalar to specify a sample distance for all dimensions. + 1. Single scalar to specify a sample distance for all dimensions. 2. N scalars to specify a constant sample distance for each dimension. i.e. `dx`, `dy`, `dz`, ... 3. N arrays to specify the coordinates of the values along each @@ -1033,7 +1033,7 @@ def gradient(f, *varargs, axis=None, edge_order=1): Gradient is calculated using N-th order accurate differences at the boundaries. Default: 1. axis : None or int or tuple of ints, optional - Gradient is calculated only along the given axis or axes + Gradient is calculated only along the given axis or axes. The default (axis = None) is to calculate the gradient for all the axes of the input array. axis may be negative, in which case it counts from the last to the first axis. @@ -1956,7 +1956,7 @@ def trim_zeros(filt, trim='fb', axis=None): returned that still contains all values which are not zero. If an axis is specified, `filt` will be sliced in that dimension only on the sides specified by `trim`. The remaining area will be the - smallest that still contains all values wich are not zero. + smallest that still contains all values which are not zero. .. versionadded:: 2.2.0 @@ -2114,11 +2114,11 @@ def place(arr, mask, vals): arr : ndarray Array to put data into. mask : array_like - Boolean mask array. Must have the same size as `a`. + Boolean mask array. Must have the same size as `arr`. vals : 1-D sequence - Values to put into `a`. Only the first N elements are used, where + Values to put into `arr`. Only the first N elements are used, where N is the number of True values in `mask`. If `vals` is smaller - than N, it will be repeated, and if elements of `a` are to be masked, + than N, it will be repeated, and if elements of `arr` are to be masked, this sequence must be non-empty. See Also @@ -2190,17 +2190,17 @@ def _update_dim_sizes(dim_sizes, arg, core_dims): num_core_dims = len(core_dims) if arg.ndim < num_core_dims: raise ValueError( - '%d-dimensional argument does not have enough ' - 'dimensions for all core dimensions %r' - % (arg.ndim, core_dims)) + f'{arg.ndim}-dimensional argument does not have enough ' + f'dimensions for all core dimensions {core_dims!r}') core_shape = arg.shape[-num_core_dims:] for dim, size in zip(core_dims, core_shape): if dim in dim_sizes: if size != dim_sizes[dim]: raise ValueError( - 'inconsistent size for core dimension %r: %r vs %r' - % (dim, size, dim_sizes[dim])) + f'inconsistent size for core dimension {dim!r}: {size!r} vs ' + f'{dim_sizes[dim]!r}' + ) else: dim_sizes[dim] = size @@ -2610,9 +2610,10 @@ def _vectorize_call_with_signature(self, func, args): input_core_dims, output_core_dims = self._in_and_out_core_dims if len(args) != len(input_core_dims): - raise TypeError('wrong number of positional arguments: ' - 'expected %r, got %r' - % (len(input_core_dims), len(args))) + raise TypeError( + 'wrong number of positional arguments: ' + f'expected {len(input_core_dims)!r}, got {len(args)!r}' + ) args = tuple(asanyarray(arg) for arg in args) broadcast_shape, dim_sizes = _parse_input_dimensions( @@ -2633,8 +2634,9 @@ def _vectorize_call_with_signature(self, func, args): if nout != n_results: raise ValueError( - 'wrong number of outputs from pyfunc: expected %r, got %r' - % (nout, n_results)) + f'wrong number of outputs from pyfunc: expected {nout!r}, ' + f'got {n_results!r}' + ) if nout == 1: results = (results,) @@ -3071,15 +3073,14 @@ def blackman(M): "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. It is known as a "near optimal" tapering function, almost as good (by some measures) - as the kaiser window. + as the Kaiser window. References ---------- - Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, - Dover Publications, New York. - - Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. - Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. + .. [1] Blackman, R.B. and Tukey, J.W., (1958) + The measurement of power spectra, Dover Publications, New York. + .. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. + Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. Examples -------- @@ -3857,13 +3858,21 @@ def _ureduce(a, func, keepdims=False, **kwargs): if len(axis) == 1: kwargs['axis'] = axis[0] else: - keep = set(range(nd)) - set(axis) + keep = sorted(set(range(nd)) - set(axis)) nkeep = len(keep) - # swap axis that should not be reduced to front - for i, s in enumerate(sorted(keep)): - a = a.swapaxes(i, s) - # merge reduced axis - a = a.reshape(a.shape[:nkeep] + (-1,)) + + def reshape_arr(a): + # move axis that should not be reduced to front + a = np.moveaxis(a, keep, range(nkeep)) + # merge reduced axis + return a.reshape(a.shape[:nkeep] + (-1,)) + + a = reshape_arr(a) + + weights = kwargs.get("weights") + if weights is not None: + kwargs["weights"] = reshape_arr(weights) + kwargs['axis'] = -1 elif keepdims and out is not None: index_out = (0, ) * nd @@ -4219,6 +4228,7 @@ def percentile(a, if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") + weak_q = type(q) in (int, float) # use weak promotion for final result type q = np.true_divide(q, 100, out=...) if not _quantile_is_valid(q): raise ValueError("Percentiles must be in the range [0, 100]") @@ -4235,7 +4245,7 @@ def percentile(a, raise ValueError("Weights must be non-negative.") return _quantile_unchecked( - a, q, axis, out, overwrite_input, method, keepdims, weights) + a, q, axis, out, overwrite_input, method, keepdims, weights, weak_q) def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, @@ -4467,6 +4477,7 @@ def quantile(a, if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") + weak_q = type(q) in (int, float) # use weak promotion for final result type q = np.asanyarray(q) if not _quantile_is_valid(q): @@ -4484,7 +4495,7 @@ def quantile(a, raise ValueError("Weights must be non-negative.") return _quantile_unchecked( - a, q, axis, out, overwrite_input, method, keepdims, weights) + a, q, axis, out, overwrite_input, method, keepdims, weights, weak_q) def _quantile_unchecked(a, @@ -4494,7 +4505,8 @@ def _quantile_unchecked(a, overwrite_input=False, method="linear", keepdims=False, - weights=None): + weights=None, + weak_q=False): """Assumes that q is in [0, 1], and is an ndarray""" return _ureduce(a, func=_quantile_ureduce_func, @@ -4504,7 +4516,8 @@ def _quantile_unchecked(a, axis=axis, out=out, overwrite_input=overwrite_input, - method=method) + method=method, + weak_q=weak_q) def _quantile_is_valid(q): @@ -4543,7 +4556,7 @@ def _compute_virtual_index(n, quantiles, alpha: float, beta: float): ) - 1 -def _get_gamma(virtual_indexes, previous_indexes, method, dtype): +def _get_gamma(virtual_indexes, previous_indexes, method): """ Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation of quantiles. @@ -4564,7 +4577,7 @@ def _get_gamma(virtual_indexes, previous_indexes, method, dtype): gamma = method["fix_gamma"](gamma, virtual_indexes) # Ensure both that we have an array, and that we keep the dtype # (which may have been matched to the input array). - return np.asanyarray(gamma, dtype=dtype) + return np.asanyarray(gamma, dtype=virtual_indexes.dtype) def _lerp(a, b, t, out=None): @@ -4632,6 +4645,7 @@ def _quantile_ureduce_func( out: np.ndarray | None = None, overwrite_input: bool = False, method: str = "linear", + weak_q: bool = False, ) -> np.ndarray: if q.ndim > 2: # The code below works fine for nd, but it might not have useful @@ -4658,7 +4672,8 @@ def _quantile_ureduce_func( axis=axis, method=method, out=out, - weights=wgt) + weights=wgt, + weak_q=weak_q) return result @@ -4704,11 +4719,12 @@ def _quantile( method: str = "linear", out: np.ndarray | None = None, weights: "np.typing.ArrayLike | None" = None, + weak_q: bool = False, ) -> np.ndarray: """ Private function that doesn't support extended axis or keepdims. - These methods are extended to this function using _ureduce - See nanpercentile for parameter usage + These methods are extended to this function using _ureduce. + See nanpercentile for parameter usage. It computes the quantiles of the array for the given axis. A linear interpolation is performed based on the `method`. @@ -4782,18 +4798,13 @@ def _quantile( previous = arr[previous_indexes] next = arr[next_indexes] # --- Linear interpolation - if arr.dtype.kind in "iu": - gtype = None - elif arr.dtype.kind == "f": - # make sure the return value matches the input array type - gtype = arr.dtype - else: - gtype = virtual_indexes.dtype - gamma = _get_gamma(virtual_indexes, previous_indexes, - method_props, gtype) - result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) - gamma = gamma.reshape(result_shape) + method_props) + if weak_q: + gamma = float(gamma) + else: + result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) + gamma = gamma.reshape(result_shape) result = _lerp(previous, next, gamma, @@ -5059,7 +5070,7 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): ``(1, ..., 1, Ni, 1, ..., 1)``. These sparse coordinate grids are intended to be used with :ref:`basics.broadcasting`. When all coordinates are used in an expression, broadcasting still leads to a - fully-dimensonal result array. + fully-dimensional result array. Default is False. @@ -5175,8 +5186,8 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): if indexing == 'xy' and ndim > 1: # switch first and second axis - output[0].shape = (1, -1) + s0[2:] - output[1].shape = (-1, 1) + s0[2:] + output[0] = output[0].reshape((1, -1) + s0[2:]) + output[1] = output[1].reshape((-1, 1) + s0[2:]) if not sparse: # Return the full N-D matrix (not only the 1-D vector) @@ -5185,6 +5196,9 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): if copy: output = tuple(x.copy() for x in output) + if sparse and not copy: + return tuple(output) + return output @@ -5231,7 +5245,7 @@ def delete(arr, obj, axis=None): Often it is preferable to use a boolean mask. For example: >>> arr = np.arange(12) + 1 - >>> mask = np.ones(len(arr), dtype=bool) + >>> mask = np.ones(len(arr), dtype=np.bool) >>> mask[[0,2,4]] = False >>> result = arr[mask,...] @@ -5614,7 +5628,7 @@ def append(arr, values, axis=None): the array at index 0 has 2 dimension(s) and the array at index 1 has 1 dimension(s) - >>> a = np.array([1, 2], dtype=int) + >>> a = np.array([1, 2], dtype=np.int_) >>> c = np.append(a, []) >>> c array([1., 2.]) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index d68918560b69..5887d7d496ce 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -5,15 +5,13 @@ from typing import ( Concatenate, Literal as L, Never, - ParamSpec, Protocol, SupportsIndex, SupportsInt, - TypeAlias, overload, type_check_only, ) -from typing_extensions import TypeIs, TypeVar +from typing_extensions import TypeIs import numpy as np from numpy import _OrderKACF @@ -36,6 +34,7 @@ from numpy._typing import ( _NestedSequence as _SeqND, _NumberLike_co, _ScalarLike_co, + _Shape, _ShapeLike, _SupportsArray, ) @@ -80,39 +79,15 @@ __all__ = [ "quantile", ] -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -# The `{}ss` suffix refers to the PEP 695 (Python 3.12) `ParamSpec` syntax, `**P`. -_Tss = ParamSpec("_Tss") +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[np.dtype[ScalarT]] | Sequence[ScalarT] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ScalarT1 = TypeVar("_ScalarT1", bound=np.generic) -_ScalarT2 = TypeVar("_ScalarT2", bound=np.generic) -_FloatingT = TypeVar("_FloatingT", bound=np.floating) -_InexactT = TypeVar("_InexactT", bound=np.inexact) -_InexactTimeT = TypeVar("_InexactTimeT", bound=np.inexact | np.timedelta64) -_InexactDateTimeT = TypeVar("_InexactDateTimeT", bound=np.inexact | np.timedelta64 | np.datetime64) -_ScalarNumericT = TypeVar("_ScalarNumericT", bound=np.inexact | np.timedelta64 | np.object_) -_AnyDoubleT = TypeVar("_AnyDoubleT", bound=np.float64 | np.longdouble | np.complex128 | np.clongdouble) - -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) -_ArrayFloatingT = TypeVar("_ArrayFloatingT", bound=NDArray[np.floating]) -_ArrayFloatObjT = TypeVar("_ArrayFloatObjT", bound=NDArray[np.floating | np.object_]) -_ArrayComplexT = TypeVar("_ArrayComplexT", bound=NDArray[np.complexfloating]) -_ArrayInexactT = TypeVar("_ArrayInexactT", bound=NDArray[np.inexact]) -_ArrayNumericT = TypeVar("_ArrayNumericT", bound=NDArray[np.inexact | np.timedelta64 | np.object_]) - -_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] - -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) - -_integer_co: TypeAlias = np.integer | np.bool -_float64_co: TypeAlias = np.float64 | _integer_co -_floating_co: TypeAlias = np.floating | _integer_co +type _integer_co = np.integer | np.bool +type _float64_co = np.float64 | _integer_co +type _floating_co = np.floating | _integer_co # non-trivial scalar-types that will become `complex128` in `sort_complex()`, # i.e. all numeric scalar types except for `[u]int{8,16} | longdouble` -_SortsToComplex128: TypeAlias = ( +type _SortsToComplex128 = ( np.bool | np.int32 | np.uint32 @@ -124,33 +99,37 @@ _SortsToComplex128: TypeAlias = ( | np.timedelta64 | np.object_ ) +type _ScalarNumeric = np.inexact | np.timedelta64 | np.object_ +type _InexactDouble = np.float64 | np.longdouble | np.complex128 | np.clongdouble -_Array: TypeAlias = np.ndarray[_ShapeT, np.dtype[_ScalarT]] -_Array0D: TypeAlias = np.ndarray[tuple[()], np.dtype[_ScalarT]] -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] -_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] -_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[_ScalarT]] -_ArrayMax2D: TypeAlias = np.ndarray[tuple[int] | tuple[int, int], np.dtype[_ScalarT]] +type _Array[ShapeT: _Shape, ScalarT: np.generic] = np.ndarray[ShapeT, np.dtype[ScalarT]] +type _Array0D[ScalarT: np.generic] = np.ndarray[tuple[()], np.dtype[ScalarT]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] +type _ArrayMax2D[ScalarT: np.generic] = np.ndarray[tuple[int] | tuple[int, int], np.dtype[ScalarT]] # workaround for mypy and pyright not following the typing spec for overloads -_ArrayNoD: TypeAlias = np.ndarray[tuple[Never, Never, Never, Never], np.dtype[_ScalarT]] +type _ArrayNoD[ScalarT: np.generic] = np.ndarray[tuple[Never, Never, Never, Never], np.dtype[ScalarT]] -_Seq1D: TypeAlias = Sequence[_T] -_Seq2D: TypeAlias = Sequence[Sequence[_T]] -_Seq3D: TypeAlias = Sequence[Sequence[Sequence[_T]]] -_ListSeqND: TypeAlias = list[_T] | _SeqND[list[_T]] +type _Seq1D[T] = Sequence[T] +type _Seq2D[T] = Sequence[Sequence[T]] +type _Seq3D[T] = Sequence[Sequence[Sequence[T]]] +type _ListSeqND[T] = list[T] | _SeqND[list[T]] -_Tuple2: TypeAlias = tuple[_T, _T] -_Tuple3: TypeAlias = tuple[_T, _T, _T] -_Tuple4: TypeAlias = tuple[_T, _T, _T, _T] +type _Tuple2[T] = tuple[T, T] +type _Tuple3[T] = tuple[T, T, T] +type _Tuple4[T] = tuple[T, T, T, T] -_Mesh1: TypeAlias = tuple[_Array1D[_ScalarT]] -_Mesh2: TypeAlias = tuple[_Array2D[_ScalarT], _Array2D[_ScalarT1]] -_Mesh3: TypeAlias = tuple[_Array3D[_ScalarT], _Array3D[_ScalarT1], _Array3D[_ScalarT2]] +type _Mesh1[ScalarT: np.generic] = tuple[_Array1D[ScalarT]] +type _Mesh2[ScalarT: np.generic, ScalarT1: np.generic] = tuple[_Array2D[ScalarT], _Array2D[ScalarT1]] +type _Mesh3[ScalarT: np.generic, ScalarT1: np.generic, ScalarT2: np.generic] = tuple[ + _Array3D[ScalarT], _Array3D[ScalarT1], _Array3D[ScalarT2] +] -_IndexLike: TypeAlias = slice | _ArrayLikeInt_co +type _IndexLike = slice | _ArrayLikeInt_co -_Indexing: TypeAlias = L["ij", "xy"] -_InterpolationMethod = L[ +type _Indexing = L["ij", "xy"] +type _InterpolationMethod = L[ "inverted_cdf", "averaged_inverted_cdf", "closest_observation", @@ -168,31 +147,31 @@ _InterpolationMethod = L[ # The resulting value will be used as `y[cond] = func(vals, *args, **kw)`, so in can # return any (usually 1d) array-like or scalar-like compatible with the input. -_PiecewiseFunction: TypeAlias = Callable[Concatenate[NDArray[_ScalarT], _Tss], ArrayLike] -_PiecewiseFunctions: TypeAlias = _SizedIterable[_PiecewiseFunction[_ScalarT, _Tss] | _ScalarLike_co] +type _PiecewiseFunction[ScalarT: np.generic, **Tss] = Callable[Concatenate[NDArray[ScalarT], Tss], ArrayLike] +type _PiecewiseFunctions[ScalarT: np.generic, **Tss] = _SizedIterable[_PiecewiseFunction[ScalarT, Tss] | _ScalarLike_co] @type_check_only -class _TrimZerosSequence(Protocol[_T_co]): +class _TrimZerosSequence[T](Protocol): def __len__(self, /) -> int: ... @overload def __getitem__(self, key: int, /) -> object: ... @overload - def __getitem__(self, key: slice, /) -> _T_co: ... + def __getitem__(self, key: slice, /) -> T: ... @type_check_only -class _SupportsRMulFloat(Protocol[_T_co]): - def __rmul__(self, other: float, /) -> _T_co: ... +class _SupportsRMulFloat[T](Protocol): + def __rmul__(self, other: float, /) -> T: ... @type_check_only -class _SizedIterable(Protocol[_T_co]): - def __iter__(self) -> Iterable[_T_co]: ... +class _SizedIterable[T](Protocol): + def __iter__(self) -> Iterable[T]: ... def __len__(self) -> int: ... ### class vectorize: __doc__: str | None - __module__: L["numpy"] = "numpy" + __module__: L["numpy"] = "numpy" # pyrefly: ignore[bad-override] pyfunc: Callable[..., Incomplete] cache: bool signature: str | None @@ -212,18 +191,18 @@ class vectorize: def __call__(self, /, *args: Incomplete, **kwargs: Incomplete) -> Incomplete: ... @overload -def rot90(m: _ArrayT, k: int = 1, axes: tuple[int, int] = (0, 1)) -> _ArrayT: ... +def rot90[ArrayT: np.ndarray](m: ArrayT, k: int = 1, axes: tuple[int, int] = (0, 1)) -> ArrayT: ... @overload -def rot90(m: _ArrayLike[_ScalarT], k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[_ScalarT]: ... +def rot90[ScalarT: np.generic](m: _ArrayLike[ScalarT], k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[ScalarT]: ... @overload def rot90(m: ArrayLike, k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[Incomplete]: ... # NOTE: Technically `flip` also accept scalars, but that has no effect and complicates # the overloads significantly, so we ignore that case here. @overload -def flip(m: _ArrayT, axis: int | tuple[int, ...] | None = None) -> _ArrayT: ... +def flip[ArrayT: np.ndarray](m: ArrayT, axis: int | tuple[int, ...] | None = None) -> ArrayT: ... @overload -def flip(m: _ArrayLike[_ScalarT], axis: int | tuple[int, ...] | None = None) -> NDArray[_ScalarT]: ... +def flip[ScalarT: np.generic](m: _ArrayLike[ScalarT], axis: int | tuple[int, ...] | None = None) -> NDArray[ScalarT]: ... @overload def flip(m: ArrayLike, axis: int | tuple[int, ...] | None = None) -> NDArray[Incomplete]: ... @@ -235,77 +214,77 @@ def iterable(y: object) -> TypeIs[Iterable[Any]]: ... # NOTE: This assumes that if `keepdims=True` the input is at least 1d, and will # therefore always return an array. @overload # inexact array, keepdims=True -def average( - a: _ArrayInexactT, +def average[ArrayT: NDArray[np.inexact]]( + a: ArrayT, axis: int | tuple[int, ...] | None = None, weights: _ArrayLikeNumber_co | None = None, returned: L[False] = False, *, keepdims: L[True], -) -> _ArrayInexactT: ... +) -> ArrayT: ... @overload # inexact array, returned=True keepdims=True -def average( - a: _ArrayInexactT, +def average[ArrayT: NDArray[np.inexact]]( + a: ArrayT, axis: int | tuple[int, ...] | None = None, weights: _ArrayLikeNumber_co | None = None, *, returned: L[True], keepdims: L[True], -) -> _Tuple2[_ArrayInexactT]: ... +) -> _Tuple2[ArrayT]: ... @overload # inexact array-like, axis=None -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: None = None, weights: _ArrayLikeNumber_co | None = None, returned: L[False] = False, *, keepdims: L[False] | _NoValueType = ..., -) -> _InexactT: ... +) -> ScalarT: ... @overload # inexact array-like, axis= -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: int | tuple[int, ...], weights: _ArrayLikeNumber_co | None = None, returned: L[False] = False, *, keepdims: L[False] | _NoValueType = ..., -) -> NDArray[_InexactT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact array-like, keepdims=True -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: int | tuple[int, ...] | None = None, weights: _ArrayLikeNumber_co | None = None, returned: L[False] = False, *, keepdims: L[True], -) -> NDArray[_InexactT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact array-like, axis=None, returned=True -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: None = None, weights: _ArrayLikeNumber_co | None = None, *, returned: L[True], keepdims: L[False] | _NoValueType = ..., -) -> _Tuple2[_InexactT]: ... +) -> _Tuple2[ScalarT]: ... @overload # inexact array-like, axis=, returned=True -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: int | tuple[int, ...], weights: _ArrayLikeNumber_co | None = None, *, returned: L[True], keepdims: L[False] | _NoValueType = ..., -) -> _Tuple2[NDArray[_InexactT]]: ... +) -> _Tuple2[NDArray[ScalarT]]: ... @overload # inexact array-like, returned=True, keepdims=True -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: int | tuple[int, ...] | None = None, weights: _ArrayLikeNumber_co | None = None, *, returned: L[True], keepdims: L[True], -) -> _Tuple2[NDArray[_InexactT]]: ... +) -> _Tuple2[NDArray[ScalarT]]: ... @overload # bool or integer array-like, axis=None def average( a: _SeqND[float] | _ArrayLikeInt_co, @@ -471,15 +450,19 @@ def average( # @overload -def asarray_chkfinite(a: _ArrayT, dtype: None = None, order: _OrderKACF = None) -> _ArrayT: ... +def asarray_chkfinite[ArrayT: np.ndarray](a: ArrayT, dtype: None = None, order: _OrderKACF = None) -> ArrayT: ... @overload -def asarray_chkfinite( - a: np.ndarray[_ShapeT], dtype: _DTypeLike[_ScalarT], order: _OrderKACF = None -) -> _Array[_ShapeT, _ScalarT]: ... +def asarray_chkfinite[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT], dtype: _DTypeLike[ScalarT], order: _OrderKACF = None +) -> _Array[ShapeT, ScalarT]: ... @overload -def asarray_chkfinite(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF = None) -> NDArray[_ScalarT]: ... +def asarray_chkfinite[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, order: _OrderKACF = None +) -> NDArray[ScalarT]: ... @overload -def asarray_chkfinite(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF = None) -> NDArray[_ScalarT]: ... +def asarray_chkfinite[ScalarT: np.generic]( + a: object, dtype: _DTypeLike[ScalarT], order: _OrderKACF = None +) -> NDArray[ScalarT]: ... @overload def asarray_chkfinite(a: object, dtype: DTypeLike | None = None, order: _OrderKACF = None) -> NDArray[Incomplete]: ... @@ -488,33 +471,33 @@ def asarray_chkfinite(a: object, dtype: DTypeLike | None = None, order: _OrderKA # practice anything that `np.array(condlist, dtype=bool)` accepts will work, i.e. any # array-like. @overload -def piecewise( - x: _Array[_ShapeT, _ScalarT], +def piecewise[ShapeT: _Shape, ScalarT: np.generic, **Tss]( + x: _Array[ShapeT, ScalarT], condlist: ArrayLike, - funclist: _PiecewiseFunctions[Any, _Tss], - *args: _Tss.args, - **kw: _Tss.kwargs, -) -> _Array[_ShapeT, _ScalarT]: ... + funclist: _PiecewiseFunctions[Any, Tss], + *args: Tss.args, + **kw: Tss.kwargs, +) -> _Array[ShapeT, ScalarT]: ... @overload -def piecewise( - x: _ArrayLike[_ScalarT], +def piecewise[ScalarT: np.generic, **Tss]( + x: _ArrayLike[ScalarT], condlist: ArrayLike, - funclist: _PiecewiseFunctions[Any, _Tss], - *args: _Tss.args, - **kw: _Tss.kwargs, -) -> NDArray[_ScalarT]: ... + funclist: _PiecewiseFunctions[Any, Tss], + *args: Tss.args, + **kw: Tss.kwargs, +) -> NDArray[ScalarT]: ... @overload -def piecewise( +def piecewise[ScalarT: np.generic, **Tss]( x: ArrayLike, condlist: ArrayLike, - funclist: _PiecewiseFunctions[_ScalarT, _Tss], - *args: _Tss.args, - **kw: _Tss.kwargs, -) -> NDArray[_ScalarT]: ... + funclist: _PiecewiseFunctions[ScalarT, Tss], + *args: Tss.args, + **kw: Tss.kwargs, +) -> NDArray[ScalarT]: ... # NOTE: condition is usually boolean, but anything with zero/non-zero semantics works @overload -def extract(condition: ArrayLike, arr: _ArrayLike[_ScalarT]) -> _Array1D[_ScalarT]: ... +def extract[ScalarT: np.generic](condition: ArrayLike, arr: _ArrayLike[ScalarT]) -> _Array1D[ScalarT]: ... @overload def extract(condition: ArrayLike, arr: _SeqND[bool]) -> _Array1D[np.bool]: ... @overload @@ -533,64 +516,64 @@ def extract(condition: ArrayLike, arr: ArrayLike) -> _Array1D[Incomplete]: ... # NOTE: unlike `extract`, passing non-boolean conditions for `condlist` will raise an # error at runtime @overload -def select( +def select[ArrayT: np.ndarray]( condlist: _SizedIterable[_ArrayLikeBool_co], - choicelist: Sequence[_ArrayT], - default: _ScalarLike_co = 0, -) -> _ArrayT: ... + choicelist: Sequence[ArrayT], + default: ArrayLike = 0, +) -> ArrayT: ... @overload -def select( +def select[ScalarT: np.generic]( condlist: _SizedIterable[_ArrayLikeBool_co], - choicelist: Sequence[_ArrayLike[_ScalarT]] | NDArray[_ScalarT], - default: _ScalarLike_co = 0, -) -> NDArray[_ScalarT]: ... + choicelist: Sequence[_ArrayLike[ScalarT]] | NDArray[ScalarT], + default: ArrayLike = 0, +) -> NDArray[ScalarT]: ... @overload def select( condlist: _SizedIterable[_ArrayLikeBool_co], choicelist: Sequence[ArrayLike], - default: _ScalarLike_co = 0, + default: ArrayLike = 0, ) -> np.ndarray: ... # keep roughly in sync with `ma.core.copy` @overload -def copy(a: _ArrayT, order: _OrderKACF, subok: L[True]) -> _ArrayT: ... +def copy[ArrayT: np.ndarray](a: ArrayT, order: _OrderKACF, subok: L[True]) -> ArrayT: ... @overload -def copy(a: _ArrayT, order: _OrderKACF = "K", *, subok: L[True]) -> _ArrayT: ... +def copy[ArrayT: np.ndarray](a: ArrayT, order: _OrderKACF = "K", *, subok: L[True]) -> ArrayT: ... @overload -def copy(a: _ArrayLike[_ScalarT], order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[_ScalarT]: ... +def copy[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[ScalarT]: ... @overload def copy(a: ArrayLike, order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[Incomplete]: ... # @overload # ?d, known inexact scalar-type -def gradient( - f: _ArrayNoD[_InexactTimeT], +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _ArrayNoD[ScalarT], *varargs: _ArrayLikeNumber_co, axis: _ShapeLike | None = None, edge_order: L[1, 2] = 1, # `| Any` instead of ` | tuple` is returned to avoid several mypy_primer errors -) -> _Array1D[_InexactTimeT] | Any: ... +) -> _Array1D[ScalarT] | Any: ... @overload # 1d, known inexact scalar-type -def gradient( - f: _Array1D[_InexactTimeT], +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _Array1D[ScalarT], *varargs: _ArrayLikeNumber_co, axis: _ShapeLike | None = None, edge_order: L[1, 2] = 1, -) -> _Array1D[_InexactTimeT]: ... +) -> _Array1D[ScalarT]: ... @overload # 2d, known inexact scalar-type -def gradient( - f: _Array2D[_InexactTimeT], +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _Array2D[ScalarT], *varargs: _ArrayLikeNumber_co, axis: _ShapeLike | None = None, edge_order: L[1, 2] = 1, -) -> _Mesh2[_InexactTimeT, _InexactTimeT]: ... +) -> _Mesh2[ScalarT, ScalarT]: ... @overload # 3d, known inexact scalar-type -def gradient( - f: _Array3D[_InexactTimeT], +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _Array3D[ScalarT], *varargs: _ArrayLikeNumber_co, axis: _ShapeLike | None = None, edge_order: L[1, 2] = 1, -) -> _Mesh3[_InexactTimeT, _InexactTimeT, _InexactTimeT]: ... +) -> _Mesh3[ScalarT, ScalarT, ScalarT]: ... @overload # ?d, datetime64 scalar-type def gradient( f: _ArrayNoD[np.datetime64], @@ -669,39 +652,39 @@ def gradient( edge_order: L[1, 2] = 1, ) -> Incomplete: ... -# +# keep in sync with `ma.core.diff` @overload # n == 0; return input unchanged -def diff( - a: _T, +def diff[T]( + a: T, n: L[0], axis: SupportsIndex = -1, prepend: ArrayLike | _NoValueType = ..., # = _NoValue append: ArrayLike | _NoValueType = ..., # = _NoValue -) -> _T: ... +) -> T: ... @overload # known array-type -def diff( - a: _ArrayNumericT, +def diff[ArrayT: NDArray[_ScalarNumeric]]( + a: ArrayT, n: int = 1, axis: SupportsIndex = -1, prepend: ArrayLike | _NoValueType = ..., append: ArrayLike | _NoValueType = ..., -) -> _ArrayNumericT: ... +) -> ArrayT: ... @overload # known shape, datetime64 -def diff( - a: _Array[_ShapeT, np.datetime64], +def diff[ShapeT: _Shape]( + a: _Array[ShapeT, np.datetime64], n: int = 1, axis: SupportsIndex = -1, prepend: ArrayLike | _NoValueType = ..., append: ArrayLike | _NoValueType = ..., -) -> _Array[_ShapeT, np.timedelta64]: ... +) -> _Array[ShapeT, np.timedelta64]: ... @overload # unknown shape, known scalar-type -def diff( - a: _ArrayLike[_ScalarNumericT], +def diff[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], n: int = 1, axis: SupportsIndex = -1, prepend: ArrayLike | _NoValueType = ..., append: ArrayLike | _NoValueType = ..., -) -> NDArray[_ScalarNumericT]: ... +) -> NDArray[ScalarT]: ... @overload # unknown shape, datetime64 def diff( a: _ArrayLike[np.datetime64], @@ -787,23 +770,23 @@ def interp( period: _FloatLike_co | None = None, ) -> np.complex128: ... @overload # float array -def interp( - x: _Array[_ShapeT, _floating_co], +def interp[ShapeT: _Shape]( + x: _Array[ShapeT, _floating_co], xp: _ArrayLikeFloat_co, fp: _ArrayLikeFloat_co, left: _FloatLike_co | None = None, right: _FloatLike_co | None = None, period: _FloatLike_co | None = None, -) -> _Array[_ShapeT, np.float64]: ... +) -> _Array[ShapeT, np.float64]: ... @overload # complex array -def interp( - x: _Array[_ShapeT, _floating_co], +def interp[ShapeT: _Shape]( + x: _Array[ShapeT, _floating_co], xp: _ArrayLikeFloat_co, fp: _ArrayLike1D[np.complexfloating] | list[complex], left: _NumberLike_co | None = None, right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> _Array[_ShapeT, np.complex128]: ... +) -> _Array[ShapeT, np.complex128]: ... @overload # float sequence def interp( x: _Seq1D[_FloatLike_co], @@ -870,7 +853,7 @@ def interp( # @overload # 0d T: floating -> 0d T -def angle(z: _FloatingT, deg: bool = False) -> _FloatingT: ... +def angle[FloatingT: np.floating](z: FloatingT, deg: bool = False) -> FloatingT: ... @overload # 0d complex | float | ~integer -> 0d float64 def angle(z: complex | _integer_co, deg: bool = False) -> np.float64: ... @overload # 0d complex64 -> 0d float32 @@ -878,13 +861,13 @@ def angle(z: np.complex64, deg: bool = False) -> np.float32: ... @overload # 0d clongdouble -> 0d longdouble def angle(z: np.clongdouble, deg: bool = False) -> np.longdouble: ... @overload # T: nd floating -> T -def angle(z: _ArrayFloatingT, deg: bool = False) -> _ArrayFloatingT: ... +def angle[ArrayFloatingT: NDArray[np.floating]](z: ArrayFloatingT, deg: bool = False) -> ArrayFloatingT: ... @overload # nd T: complex128 | ~integer -> nd float64 -def angle(z: _Array[_ShapeT, np.complex128 | _integer_co], deg: bool = False) -> _Array[_ShapeT, np.float64]: ... +def angle[ShapeT: _Shape](z: _Array[ShapeT, np.complex128 | _integer_co], deg: bool = False) -> _Array[ShapeT, np.float64]: ... @overload # nd T: complex64 -> nd float32 -def angle(z: _Array[_ShapeT, np.complex64], deg: bool = False) -> _Array[_ShapeT, np.float32]: ... +def angle[ShapeT: _Shape](z: _Array[ShapeT, np.complex64], deg: bool = False) -> _Array[ShapeT, np.float32]: ... @overload # nd T: clongdouble -> nd longdouble -def angle(z: _Array[_ShapeT, np.clongdouble], deg: bool = False) -> _Array[_ShapeT, np.longdouble]: ... +def angle[ShapeT: _Shape](z: _Array[ShapeT, np.clongdouble], deg: bool = False) -> _Array[ShapeT, np.longdouble]: ... @overload # 1d complex -> 1d float64 def angle(z: _Seq1D[complex], deg: bool = False) -> _Array1D[np.float64]: ... @overload # 2d complex -> 2d float64 @@ -896,21 +879,21 @@ def angle(z: _ArrayLikeComplex_co, deg: bool = False) -> NDArray[np.floating] | # @overload # known array-type -def unwrap( - p: _ArrayFloatObjT, +def unwrap[ArrayT: NDArray[np.floating | np.object_]]( + p: ArrayT, discont: float | None = None, axis: int = -1, *, period: float = ..., # = Ī„ -) -> _ArrayFloatObjT: ... +) -> ArrayT: ... @overload # known shape, float64 -def unwrap( - p: _Array[_ShapeT, _float64_co], +def unwrap[ShapeT: _Shape]( + p: _Array[ShapeT, _float64_co], discont: float | None = None, axis: int = -1, *, period: float = ..., # = Ī„ -) -> _Array[_ShapeT, np.float64]: ... +) -> _Array[ShapeT, np.float64]: ... @overload # 1d float64-like def unwrap( p: _Seq1D[float | _float64_co], @@ -954,28 +937,28 @@ def unwrap( # @overload -def sort_complex(a: _ArrayComplexT) -> _ArrayComplexT: ... +def sort_complex[ArrayT: NDArray[np.complexfloating]](a: ArrayT) -> ArrayT: ... @overload # complex64, shape known -def sort_complex(a: _Array[_ShapeT, np.int8 | np.uint8 | np.int16 | np.uint16]) -> _Array[_ShapeT, np.complex64]: ... +def sort_complex[ShapeT: _Shape](a: _Array[ShapeT, np.int8 | np.uint8 | np.int16 | np.uint16]) -> _Array[ShapeT, np.complex64]: ... @overload # complex64, shape unknown def sort_complex(a: _ArrayLike[np.int8 | np.uint8 | np.int16 | np.uint16]) -> NDArray[np.complex64]: ... @overload # complex128, shape known -def sort_complex(a: _Array[_ShapeT, _SortsToComplex128]) -> _Array[_ShapeT, np.complex128]: ... +def sort_complex[ShapeT: _Shape](a: _Array[ShapeT, _SortsToComplex128]) -> _Array[ShapeT, np.complex128]: ... @overload # complex128, shape unknown def sort_complex(a: _ArrayLike[_SortsToComplex128]) -> NDArray[np.complex128]: ... @overload # clongdouble, shape known -def sort_complex(a: _Array[_ShapeT, np.longdouble]) -> _Array[_ShapeT, np.clongdouble]: ... +def sort_complex[ShapeT: _Shape](a: _Array[ShapeT, np.longdouble]) -> _Array[ShapeT, np.clongdouble]: ... @overload # clongdouble, shape unknown def sort_complex(a: _ArrayLike[np.longdouble]) -> NDArray[np.clongdouble]: ... # -def trim_zeros(filt: _TrimZerosSequence[_T], trim: L["f", "b", "fb", "bf"] = "fb", axis: _ShapeLike | None = None) -> _T: ... +def trim_zeros[T](filt: _TrimZerosSequence[T], trim: L["f", "b", "fb", "bf"] = "fb", axis: _ShapeLike | None = None) -> T: ... # NOTE: keep in sync with `corrcoef` @overload # ?d, known inexact scalar-type >=64 precision, y=. -def cov( - m: _ArrayLike[_AnyDoubleT], - y: _ArrayLike[_AnyDoubleT], +def cov[ScalarT: _InexactDouble]( + m: _ArrayLike[ScalarT], + y: _ArrayLike[ScalarT], rowvar: bool = True, bias: bool = False, ddof: SupportsIndex | SupportsInt | None = None, @@ -983,10 +966,10 @@ def cov( aweights: _ArrayLikeFloat_co | None = None, *, dtype: None = None, -) -> _Array2D[_AnyDoubleT]: ... +) -> _Array2D[ScalarT]: ... @overload # ?d, known inexact scalar-type >=64 precision, y=None -> 0d or 2d -def cov( - m: _ArrayNoD[_AnyDoubleT], +def cov[ScalarT: _InexactDouble]( + m: _ArrayNoD[ScalarT], y: None = None, rowvar: bool = True, bias: bool = False, @@ -994,11 +977,11 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> NDArray[_AnyDoubleT]: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> NDArray[ScalarT]: ... @overload # 1d, known inexact scalar-type >=64 precision, y=None -def cov( - m: _Array1D[_AnyDoubleT], +def cov[ScalarT: _InexactDouble]( + m: _Array1D[ScalarT], y: None = None, rowvar: bool = True, bias: bool = False, @@ -1006,11 +989,11 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> _Array0D[_AnyDoubleT]: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array0D[ScalarT]: ... @overload # nd, known inexact scalar-type >=64 precision, y=None -> 0d or 2d -def cov( - m: _ArrayLike[_AnyDoubleT], +def cov[ScalarT: _InexactDouble]( + m: _ArrayLike[ScalarT], y: None = None, rowvar: bool = True, bias: bool = False, @@ -1018,8 +1001,8 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> NDArray[_AnyDoubleT]: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> NDArray[ScalarT]: ... @overload # nd, casts to float64, y= def cov( m: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], @@ -1105,7 +1088,7 @@ def cov( dtype: _DTypeLike[np.complex128] | None = None, ) -> NDArray[np.complex128]: ... @overload # 1d complex-like, y=None, dtype= -def cov( +def cov[ScalarT: np.generic]( m: _Seq1D[_ComplexLike_co], y: None = None, rowvar: bool = True, @@ -1114,10 +1097,10 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_ScalarT], -) -> _Array0D[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], +) -> _Array0D[ScalarT]: ... @overload # nd complex-like, y=, dtype= -def cov( +def cov[ScalarT: np.generic]( m: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, rowvar: bool = True, @@ -1126,10 +1109,10 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_ScalarT], -) -> _Array2D[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], +) -> _Array2D[ScalarT]: ... @overload # nd complex-like, y=None, dtype= -> 0d or 2d -def cov( +def cov[ScalarT: np.generic]( m: _ArrayLikeComplex_co, y: None = None, rowvar: bool = True, @@ -1138,8 +1121,8 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_ScalarT], -) -> NDArray[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], +) -> NDArray[ScalarT]: ... @overload # nd complex-like, y=, dtype=? def cov( m: _ArrayLikeComplex_co, @@ -1182,37 +1165,37 @@ def cov( # This differs from `cov`, which returns 0-D arrays instead of scalars in such cases. # NOTE: keep in sync with `cov` @overload # ?d, known inexact scalar-type >=64 precision, y=. -def corrcoef( - x: _ArrayLike[_AnyDoubleT], - y: _ArrayLike[_AnyDoubleT], +def corrcoef[ScalarT: _InexactDouble]( + x: _ArrayLike[ScalarT], + y: _ArrayLike[ScalarT], rowvar: bool = True, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> _Array2D[_AnyDoubleT]: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array2D[ScalarT]: ... @overload # ?d, known inexact scalar-type >=64 precision, y=None -def corrcoef( - x: _ArrayNoD[_AnyDoubleT], +def corrcoef[ScalarT: _InexactDouble]( + x: _ArrayNoD[ScalarT], y: None = None, rowvar: bool = True, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> _Array2D[_AnyDoubleT] | _AnyDoubleT: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array2D[ScalarT] | ScalarT: ... @overload # 1d, known inexact scalar-type >=64 precision, y=None -def corrcoef( - x: _Array1D[_AnyDoubleT], +def corrcoef[ScalarT: _InexactDouble]( + x: _Array1D[ScalarT], y: None = None, rowvar: bool = True, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> _AnyDoubleT: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> ScalarT: ... @overload # nd, known inexact scalar-type >=64 precision, y=None -def corrcoef( - x: _ArrayLike[_AnyDoubleT], +def corrcoef[ScalarT: _InexactDouble]( + x: _ArrayLike[ScalarT], y: None = None, rowvar: bool = True, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> _Array2D[_AnyDoubleT] | _AnyDoubleT: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array2D[ScalarT] | ScalarT: ... @overload # nd, casts to float64, y= def corrcoef( x: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], @@ -1270,29 +1253,29 @@ def corrcoef( dtype: _DTypeLike[np.complex128] | None = None, ) -> _Array2D[np.complex128] | np.complex128: ... @overload # 1d complex-like, y=None, dtype= -def corrcoef( +def corrcoef[ScalarT: np.generic]( x: _Seq1D[_ComplexLike_co], y: None = None, rowvar: bool = True, *, - dtype: _DTypeLike[_ScalarT], -) -> _ScalarT: ... + dtype: _DTypeLike[ScalarT], +) -> ScalarT: ... @overload # nd complex-like, y=, dtype= -def corrcoef( +def corrcoef[ScalarT: np.generic]( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, rowvar: bool = True, *, - dtype: _DTypeLike[_ScalarT], -) -> _Array2D[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], +) -> _Array2D[ScalarT]: ... @overload # nd complex-like, y=None, dtype= -def corrcoef( +def corrcoef[ScalarT: np.generic]( x: _ArrayLikeComplex_co, y: None = None, rowvar: bool = True, *, - dtype: _DTypeLike[_ScalarT], -) -> _Array2D[_ScalarT] | _ScalarT: ... + dtype: _DTypeLike[ScalarT], +) -> _Array2D[ScalarT] | ScalarT: ... @overload # nd complex-like, y=, dtype=? def corrcoef( x: _ArrayLikeComplex_co, @@ -1327,7 +1310,7 @@ def kaiser(M: _FloatLike_co, beta: _FloatLike_co) -> _Array1D[np.float64]: ... # @overload -def i0(x: _Array[_ShapeT, np.floating | np.integer]) -> _Array[_ShapeT, np.float64]: ... +def i0[ShapeT: _Shape](x: _Array[ShapeT, np.floating | np.integer]) -> _Array[ShapeT, np.float64]: ... @overload def i0(x: _FloatLike_co) -> _Array0D[np.float64]: ... @overload @@ -1341,15 +1324,15 @@ def i0(x: _ArrayLikeFloat_co) -> NDArray[np.float64]: ... # @overload -def sinc(x: _InexactT) -> _InexactT: ... +def sinc[ScalarT: np.inexact](x: ScalarT) -> ScalarT: ... @overload def sinc(x: float | _float64_co) -> np.float64: ... @overload def sinc(x: complex) -> np.complex128 | Any: ... @overload -def sinc(x: _ArrayInexactT) -> _ArrayInexactT: ... +def sinc[ArrayT: NDArray[np.inexact]](x: ArrayT) -> ArrayT: ... @overload -def sinc(x: _Array[_ShapeT, _integer_co]) -> _Array[_ShapeT, np.float64]: ... +def sinc[ShapeT: _Shape](x: _Array[ShapeT, _integer_co]) -> _Array[ShapeT, np.float64]: ... @overload def sinc(x: _Seq1D[float]) -> _Array1D[np.float64]: ... @overload @@ -1370,13 +1353,13 @@ def sinc(x: _ArrayLikeComplex_co) -> np.ndarray | Any: ... # NOTE: We assume that `axis` is only provided for >=1-D arrays because for <1-D arrays # it has no effect, and would complicate the overloads significantly. @overload # known scalar-type, keepdims=False (default) -def median( - a: _ArrayLike[_InexactTimeT], +def median[ScalarT: np.inexact | np.timedelta64]( + a: _ArrayLike[ScalarT], axis: None = None, out: None = None, overwrite_input: bool = False, keepdims: L[False] = False, -) -> _InexactTimeT: ... +) -> ScalarT: ... @overload # float array-like, keepdims=False (default) def median( a: _ArrayLikeInt_co | _SeqND[float] | float, @@ -1402,31 +1385,31 @@ def median( keepdims: L[False] = False, ) -> np.complex128 | Any: ... @overload # known array-type, keepdims=True -def median( - a: _ArrayNumericT, +def median[ArrayT: NDArray[_ScalarNumeric]]( + a: ArrayT, axis: _ShapeLike | None = None, out: None = None, overwrite_input: bool = False, *, keepdims: L[True], -) -> _ArrayNumericT: ... +) -> ArrayT: ... @overload # known scalar-type, keepdims=True -def median( - a: _ArrayLike[_ScalarNumericT], +def median[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], axis: _ShapeLike | None = None, out: None = None, overwrite_input: bool = False, *, keepdims: L[True], -) -> NDArray[_ScalarNumericT]: ... +) -> NDArray[ScalarT]: ... @overload # known scalar-type, axis= -def median( - a: _ArrayLike[_ScalarNumericT], +def median[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], axis: _ShapeLike, out: None = None, overwrite_input: bool = False, keepdims: bool = False, -) -> NDArray[_ScalarNumericT]: ... +) -> NDArray[ScalarT]: ... @overload # float array-like, keepdims=True def median( a: _SeqND[float], @@ -1462,22 +1445,22 @@ def median( keepdims: bool = False, ) -> NDArray[np.complex128]: ... @overload # out= (keyword) -def median( +def median[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, keepdims: bool = False, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # out= (positional) -def median( +def median[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, keepdims: bool = False, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # fallback def median( a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], @@ -1489,8 +1472,8 @@ def median( # NOTE: keep in sync with `quantile` @overload # inexact, scalar, axis=None -def percentile( - a: _ArrayLike[_InexactDateTimeT], +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: None = None, out: None = None, @@ -1499,10 +1482,10 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _InexactDateTimeT: ... +) -> ScalarT: ... @overload # inexact, scalar, axis= -def percentile( - a: _ArrayLike[_InexactDateTimeT], +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: _ShapeLike, out: None = None, @@ -1511,10 +1494,10 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact, scalar, keepdims=True -def percentile( - a: _ArrayLike[_InexactDateTimeT], +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: _ShapeLike | None = None, out: None = None, @@ -1523,11 +1506,11 @@ def percentile( *, keepdims: L[True], weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact, array, axis=None -def percentile( - a: _ArrayLike[_InexactDateTimeT], - q: _Array[_ShapeT, _floating_co], +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1535,10 +1518,10 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, _InexactDateTimeT]: ... +) -> _Array[ShapeT, ScalarT]: ... @overload # inexact, array-like -def percentile( - a: _ArrayLike[_InexactDateTimeT], +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: NDArray[_floating_co] | _SeqND[_FloatLike_co], axis: _ShapeLike | None = None, out: None = None, @@ -1547,7 +1530,7 @@ def percentile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # float, scalar, axis=None def percentile( a: _SeqND[float] | _ArrayLikeInt_co, @@ -1585,9 +1568,9 @@ def percentile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.float64]: ... @overload # float, array, axis=None -def percentile( +def percentile[ShapeT: _Shape]( a: _SeqND[float] | _ArrayLikeInt_co, - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1595,7 +1578,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.float64]: ... +) -> _Array[ShapeT, np.float64]: ... @overload # float, array-like def percentile( a: _SeqND[float] | _ArrayLikeInt_co, @@ -1645,9 +1628,9 @@ def percentile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.complex128]: ... @overload # complex, array, axis=None -def percentile( +def percentile[ShapeT: _Shape]( a: _ListSeqND[complex], - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1655,7 +1638,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.complex128]: ... +) -> _Array[ShapeT, np.complex128]: ... @overload # complex, array-like def percentile( a: _ListSeqND[complex], @@ -1705,9 +1688,9 @@ def percentile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.object_]: ... @overload # object_, array, axis=None -def percentile( +def percentile[ShapeT: _Shape]( a: _ArrayLikeObject_co, - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1715,7 +1698,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.object_]: ... +) -> _Array[ShapeT, np.object_]: ... @overload # object_, array-like def percentile( a: _ArrayLikeObject_co, @@ -1729,29 +1712,29 @@ def percentile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.object_]: ... @overload # out= (keyword) -def percentile( +def percentile[ArrayT: np.ndarray]( a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, method: _InterpolationMethod = "linear", keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # out= (positional) -def percentile( +def percentile[ArrayT: np.ndarray]( a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, method: _InterpolationMethod = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # fallback def percentile( a: _ArrayLikeNumber_co | _ArrayLikeObject_co, @@ -1767,8 +1750,8 @@ def percentile( # NOTE: keep in sync with `percentile` @overload # inexact, scalar, axis=None -def quantile( - a: _ArrayLike[_InexactDateTimeT], +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: None = None, out: None = None, @@ -1777,10 +1760,10 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _InexactDateTimeT: ... +) -> ScalarT: ... @overload # inexact, scalar, axis= -def quantile( - a: _ArrayLike[_InexactDateTimeT], +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: _ShapeLike, out: None = None, @@ -1789,10 +1772,10 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact, scalar, keepdims=True -def quantile( - a: _ArrayLike[_InexactDateTimeT], +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: _ShapeLike | None = None, out: None = None, @@ -1801,11 +1784,11 @@ def quantile( *, keepdims: L[True], weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact, array, axis=None -def quantile( - a: _ArrayLike[_InexactDateTimeT], - q: _Array[_ShapeT, _floating_co], +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1813,10 +1796,10 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, _InexactDateTimeT]: ... +) -> _Array[ShapeT, ScalarT]: ... @overload # inexact, array-like -def quantile( - a: _ArrayLike[_InexactDateTimeT], +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: NDArray[_floating_co] | _SeqND[_FloatLike_co], axis: _ShapeLike | None = None, out: None = None, @@ -1825,7 +1808,7 @@ def quantile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # float, scalar, axis=None def quantile( a: _SeqND[float] | _ArrayLikeInt_co, @@ -1863,9 +1846,9 @@ def quantile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.float64]: ... @overload # float, array, axis=None -def quantile( +def quantile[ShapeT: _Shape]( a: _SeqND[float] | _ArrayLikeInt_co, - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1873,7 +1856,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.float64]: ... +) -> _Array[ShapeT, np.float64]: ... @overload # float, array-like def quantile( a: _SeqND[float] | _ArrayLikeInt_co, @@ -1923,9 +1906,9 @@ def quantile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.complex128]: ... @overload # complex, array, axis=None -def quantile( +def quantile[ShapeT: _Shape]( a: _ListSeqND[complex], - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1933,7 +1916,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.complex128]: ... +) -> _Array[ShapeT, np.complex128]: ... @overload # complex, array-like def quantile( a: _ListSeqND[complex], @@ -1983,9 +1966,9 @@ def quantile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.object_]: ... @overload # object_, array, axis=None -def quantile( +def quantile[ShapeT: _Shape]( a: _ArrayLikeObject_co, - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1993,7 +1976,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.object_]: ... +) -> _Array[ShapeT, np.object_]: ... @overload # object_, array-like def quantile( a: _ArrayLikeObject_co, @@ -2007,29 +1990,29 @@ def quantile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.object_]: ... @overload # out= (keyword) -def quantile( +def quantile[ArrayT: np.ndarray]( a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, method: _InterpolationMethod = "linear", keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # out= (positional) -def quantile( +def quantile[ArrayT: np.ndarray]( a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, method: _InterpolationMethod = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # fallback def quantile( a: _ArrayLikeNumber_co | _ArrayLikeObject_co, @@ -2045,12 +2028,12 @@ def quantile( # @overload # ?d, known inexact/timedelta64 scalar-type -def trapezoid( - y: _ArrayNoD[_InexactTimeT], - x: _ArrayLike[_InexactTimeT] | _ArrayLikeFloat_co | None = None, +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _ArrayNoD[ScalarT], + x: _ArrayLike[ScalarT] | _ArrayLikeFloat_co | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> NDArray[_InexactTimeT] | _InexactTimeT: ... +) -> NDArray[ScalarT] | ScalarT: ... @overload # ?d, casts to float64 def trapezoid( y: _ArrayNoD[_integer_co], @@ -2059,12 +2042,12 @@ def trapezoid( axis: SupportsIndex = -1, ) -> NDArray[np.float64] | np.float64: ... @overload # strict 1d, known inexact/timedelta64 scalar-type -def trapezoid( - y: _Array1D[_InexactTimeT], - x: _Array1D[_InexactTimeT] | _Seq1D[float] | None = None, +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _Array1D[ScalarT], + x: _Array1D[ScalarT] | _Seq1D[float] | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> _InexactTimeT: ... +) -> ScalarT: ... @overload # strict 1d, casts to float64 def trapezoid( y: _Array1D[_float64_co] | _Seq1D[float], @@ -2087,12 +2070,12 @@ def trapezoid( axis: SupportsIndex = -1, ) -> np.complex128: ... @overload # strict 2d, known inexact/timedelta64 scalar-type -def trapezoid( - y: _Array2D[_InexactTimeT], - x: _ArrayMax2D[_InexactTimeT] | _Seq2D[float] | _Seq1D[float] | None = None, +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _Array2D[ScalarT], + x: _ArrayMax2D[ScalarT] | _Seq2D[float] | _Seq1D[float] | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> _InexactTimeT: ... +) -> ScalarT: ... @overload # strict 2d, casts to float64 def trapezoid( y: _Array2D[_float64_co] | _Seq2D[float], @@ -2115,12 +2098,12 @@ def trapezoid( axis: SupportsIndex = -1, ) -> np.complex128: ... @overload -def trapezoid( - y: _ArrayLike[_InexactTimeT], - x: _ArrayLike[_InexactTimeT] | _ArrayLikeInt_co | None = None, +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _ArrayLike[ScalarT], + x: _ArrayLike[ScalarT] | _ArrayLikeInt_co | None = None, dx: complex = 1.0, axis: SupportsIndex = -1, -) -> NDArray[_InexactTimeT] | _InexactTimeT: ... +) -> NDArray[ScalarT] | ScalarT: ... @overload def trapezoid( y: _ArrayLike[_float64_co], @@ -2150,12 +2133,12 @@ def trapezoid( axis: SupportsIndex = -1, ) -> NDArray[np.object_] | Any: ... @overload -def trapezoid( - y: _Seq1D[_SupportsRMulFloat[_T]], - x: _Seq1D[_SupportsRMulFloat[_T] | _T] | None = None, +def trapezoid[T]( + y: _Seq1D[_SupportsRMulFloat[T]], + x: _Seq1D[_SupportsRMulFloat[T] | T] | None = None, dx: complex = 1.0, axis: SupportsIndex = -1, -) -> _T: ... +) -> T: ... @overload def trapezoid( y: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], @@ -2168,14 +2151,14 @@ def trapezoid( @overload # 0d def meshgrid(*, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy") -> tuple[()]: ... @overload # 1d, known scalar-type -def meshgrid( - x1: _ArrayLike[_ScalarT], +def meshgrid[ScalarT: np.generic]( + x1: _ArrayLike[ScalarT], /, *, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> _Mesh1[_ScalarT]: ... +) -> _Mesh1[ScalarT]: ... @overload # 1d, unknown scalar-type def meshgrid( x1: ArrayLike, @@ -2186,35 +2169,35 @@ def meshgrid( indexing: _Indexing = "xy", ) -> _Mesh1[Any]: ... @overload # 2d, known scalar-types -def meshgrid( - x1: _ArrayLike[_ScalarT], - x2: _ArrayLike[_ScalarT1], +def meshgrid[ScalarT1: np.generic, ScalarT2: np.generic]( + x1: _ArrayLike[ScalarT1], + x2: _ArrayLike[ScalarT2], /, *, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> _Mesh2[_ScalarT, _ScalarT1]: ... +) -> _Mesh2[ScalarT1, ScalarT2]: ... @overload # 2d, known/unknown scalar-types -def meshgrid( - x1: _ArrayLike[_ScalarT], +def meshgrid[ScalarT: np.generic]( + x1: _ArrayLike[ScalarT], x2: ArrayLike, /, *, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> _Mesh2[_ScalarT, Any]: ... +) -> _Mesh2[ScalarT, Any]: ... @overload # 2d, unknown/known scalar-types -def meshgrid( +def meshgrid[ScalarT: np.generic]( x1: ArrayLike, - x2: _ArrayLike[_ScalarT], + x2: _ArrayLike[ScalarT], /, *, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> _Mesh2[Any, _ScalarT]: ... +) -> _Mesh2[Any, ScalarT]: ... @overload # 2d, unknown scalar-types def meshgrid( x1: ArrayLike, @@ -2226,16 +2209,16 @@ def meshgrid( indexing: _Indexing = "xy", ) -> _Mesh2[Any, Any]: ... @overload # 3d, known scalar-types -def meshgrid( - x1: _ArrayLike[_ScalarT], - x2: _ArrayLike[_ScalarT1], - x3: _ArrayLike[_ScalarT2], +def meshgrid[ScalarT1: np.generic, ScalarT2: np.generic, ScalarT3: np.generic]( + x1: _ArrayLike[ScalarT1], + x2: _ArrayLike[ScalarT2], + x3: _ArrayLike[ScalarT3], /, *, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> _Mesh3[_ScalarT, _ScalarT1, _ScalarT2]: ... +) -> _Mesh3[ScalarT1, ScalarT2, ScalarT3]: ... @overload # 3d, unknown scalar-types def meshgrid( x1: ArrayLike, @@ -2248,12 +2231,12 @@ def meshgrid( indexing: _Indexing = "xy", ) -> _Mesh3[Any, Any, Any]: ... @overload # ?d, known scalar-types -def meshgrid( - *xi: _ArrayLike[_ScalarT], +def meshgrid[ScalarT: np.generic]( + *xi: _ArrayLike[ScalarT], copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> tuple[NDArray[_ScalarT], ...]: ... +) -> tuple[NDArray[ScalarT], ...]: ... @overload # ?d, unknown scalar-types def meshgrid( *xi: ArrayLike, @@ -2267,11 +2250,11 @@ def place(arr: np.ndarray, mask: ConvertibleToInt | Sequence[ConvertibleToInt], # keep in sync with `insert` @overload # known scalar-type, axis=None (default) -def delete(arr: _ArrayLike[_ScalarT], obj: _IndexLike, axis: None = None) -> _Array1D[_ScalarT]: ... +def delete[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, axis: None = None) -> _Array1D[ScalarT]: ... @overload # known array-type, axis specified -def delete(arr: _ArrayT, obj: _IndexLike, axis: SupportsIndex) -> _ArrayT: ... +def delete[ArrayT: np.ndarray](arr: ArrayT, obj: _IndexLike, axis: SupportsIndex) -> ArrayT: ... @overload # known scalar-type, axis specified -def delete(arr: _ArrayLike[_ScalarT], obj: _IndexLike, axis: SupportsIndex) -> NDArray[_ScalarT]: ... +def delete[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, axis: SupportsIndex) -> NDArray[ScalarT]: ... @overload # known scalar-type, axis=None (default) def delete(arr: ArrayLike, obj: _IndexLike, axis: None = None) -> _Array1D[Any]: ... @overload # unknown scalar-type, axis specified @@ -2279,39 +2262,39 @@ def delete(arr: ArrayLike, obj: _IndexLike, axis: SupportsIndex) -> NDArray[Any] # keep in sync with `delete` @overload # known scalar-type, axis=None (default) -def insert(arr: _ArrayLike[_ScalarT], obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[_ScalarT]: ... +def insert[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[ScalarT]: ... @overload # known array-type, axis specified -def insert(arr: _ArrayT, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> _ArrayT: ... +def insert[ArrayT: np.ndarray](arr: ArrayT, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> ArrayT: ... @overload # known scalar-type, axis specified -def insert(arr: _ArrayLike[_ScalarT], obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[_ScalarT]: ... +def insert[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[ScalarT]: ... @overload # known scalar-type, axis=None (default) def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[Any]: ... @overload # unknown scalar-type, axis specified def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[Any]: ... -# +# keep in sync with `ma.core.append` @overload # known array type, axis specified -def append(arr: _ArrayT, values: _ArrayT, axis: SupportsIndex) -> _ArrayT: ... +def append[ArrayT: np.ndarray](arr: ArrayT, values: ArrayT, axis: SupportsIndex) -> ArrayT: ... @overload # 1d, known scalar type, axis specified -def append(arr: _Seq1D[_ScalarT], values: _Seq1D[_ScalarT], axis: SupportsIndex) -> _Array1D[_ScalarT]: ... +def append[ScalarT: np.generic](arr: _Seq1D[ScalarT], values: _Seq1D[ScalarT], axis: SupportsIndex) -> _Array1D[ScalarT]: ... @overload # 2d, known scalar type, axis specified -def append(arr: _Seq2D[_ScalarT], values: _Seq2D[_ScalarT], axis: SupportsIndex) -> _Array2D[_ScalarT]: ... +def append[ScalarT: np.generic](arr: _Seq2D[ScalarT], values: _Seq2D[ScalarT], axis: SupportsIndex) -> _Array2D[ScalarT]: ... @overload # 3d, known scalar type, axis specified -def append(arr: _Seq3D[_ScalarT], values: _Seq3D[_ScalarT], axis: SupportsIndex) -> _Array3D[_ScalarT]: ... +def append[ScalarT: np.generic](arr: _Seq3D[ScalarT], values: _Seq3D[ScalarT], axis: SupportsIndex) -> _Array3D[ScalarT]: ... @overload # ?d, known scalar type, axis specified -def append(arr: _SeqND[_ScalarT], values: _SeqND[_ScalarT], axis: SupportsIndex) -> NDArray[_ScalarT]: ... +def append[ScalarT: np.generic](arr: _SeqND[ScalarT], values: _SeqND[ScalarT], axis: SupportsIndex) -> NDArray[ScalarT]: ... @overload # ?d, unknown scalar type, axis specified def append(arr: np.ndarray | _SeqND[_ScalarLike_co], values: _SeqND[_ScalarLike_co], axis: SupportsIndex) -> np.ndarray: ... @overload # known scalar type, axis=None -def append(arr: _ArrayLike[_ScalarT], values: _ArrayLike[_ScalarT], axis: None = None) -> _Array1D[_ScalarT]: ... +def append[ScalarT: np.generic](arr: _ArrayLike[ScalarT], values: _ArrayLike[ScalarT], axis: None = None) -> _Array1D[ScalarT]: ... @overload # unknown scalar type, axis=None def append(arr: ArrayLike, values: ArrayLike, axis: None = None) -> _Array1D[Any]: ... # @overload -def digitize( - x: _Array[_ShapeT, np.floating | np.integer], bins: _ArrayLikeFloat_co, right: bool = False -) -> _Array[_ShapeT, np.int_]: ... +def digitize[ShapeT: _Shape]( + x: _Array[ShapeT, np.floating | np.integer], bins: _ArrayLikeFloat_co, right: bool = False +) -> _Array[ShapeT, np.int_]: ... @overload def digitize(x: _FloatLike_co, bins: _ArrayLikeFloat_co, right: bool = False) -> np.int_: ... @overload diff --git a/numpy/lib/_histograms_impl.pyi b/numpy/lib/_histograms_impl.pyi index 72a31dcedc1f..9f9bdb25aa6c 100644 --- a/numpy/lib/_histograms_impl.pyi +++ b/numpy/lib/_histograms_impl.pyi @@ -1,40 +1,273 @@ +from _typeshed import Incomplete from collections.abc import Sequence -from typing import Any, Literal as L, SupportsIndex, TypeAlias +from typing import Any, Literal as L, SupportsIndex, overload -from numpy._typing import ArrayLike, NDArray +import numpy as np +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLike, + _ArrayLikeComplex_co, + _ArrayLikeFloat64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _NestedSequence, +) __all__ = ["histogram", "histogramdd", "histogram_bin_edges"] -_BinKind: TypeAlias = L[ - "stone", - "auto", - "doane", - "fd", - "rice", - "scott", - "sqrt", - "sturges", -] +### +type _BinKind = L["auto", "fd", "doane", "scott", "stone", "rice", "sturges", "sqrt"] + +type _Range = tuple[float, float] +type _NestedList[T] = list[T] | _NestedSequence[list[T]] + +type _WeightsLike = _ArrayLikeComplex_co | _ArrayLikeObject_co +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _HistogramResult[HistT: np.generic, EdgeT: np.generic] = tuple[_Array1D[HistT], _Array1D[EdgeT]] + +### + +# NOTE: The return type can also be complex or `object_`, not only floating like the docstring suggests. +@overload # dtype +float64 +def histogram_bin_edges( + a: _ArrayLikeInt_co | _NestedSequence[float], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + weights: _WeightsLike | None = None, +) -> _Array1D[np.float64]: ... +@overload # dtype ~complex def histogram_bin_edges( - a: ArrayLike, + a: _NestedList[complex], bins: _BinKind | SupportsIndex | ArrayLike = 10, - range: tuple[float, float] | None = None, - weights: ArrayLike | None = None, -) -> NDArray[Any]: ... + range: _Range | None = None, + weights: _WeightsLike | None = None, +) -> _Array1D[np.complex128]: ... +@overload # dtype known +def histogram_bin_edges[ScalarT: np.inexact | np.object_]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + weights: _WeightsLike | None = None, +) -> _Array1D[ScalarT]: ... +@overload # dtype unknown +def histogram_bin_edges( + a: _ArrayLikeComplex_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + weights: _WeightsLike | None = None, +) -> _Array1D[Incomplete]: ... +# There are 4 groups of 2 + 3 overloads (2 for density=True, 3 for density=False) = 20 in total +@overload # a: +float64, density: True (keyword), weights: +float | None (default) def histogram( - a: ArrayLike, + a: _ArrayLikeInt_co | _NestedSequence[float], bins: _BinKind | SupportsIndex | ArrayLike = 10, - range: tuple[float, float] | None = None, - density: bool | None = None, - weights: ArrayLike | None = None, -) -> tuple[NDArray[Any], NDArray[Any]]: ... + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> _HistogramResult[np.float64, np.float64]: ... +@overload # a: +float64, density: True (keyword), weights: +complex +def histogram( + a: _ArrayLikeInt_co | _NestedSequence[float], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLike[np.complexfloating] | _NestedList[complex], +) -> _HistogramResult[np.complex128, np.float64]: ... +@overload # a: +float64, density: False (default), weights: ~int | None (default) +def histogram( + a: _ArrayLikeInt_co | _NestedSequence[float], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + weights: _NestedSequence[int] | None = None, +) -> _HistogramResult[np.intp, np.float64]: ... +@overload # a: +float64, density: False (default), weights: known (keyword) +def histogram[WeightsT: np.bool | np.number | np.timedelta64]( + a: _ArrayLikeInt_co | _NestedSequence[float], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _ArrayLike[WeightsT], +) -> _HistogramResult[WeightsT, np.float64]: ... +@overload # a: +float64, density: False (default), weights: unknown (keyword) +def histogram( + a: _ArrayLikeInt_co | _NestedSequence[float], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _WeightsLike, +) -> _HistogramResult[Incomplete, np.float64]: ... +@overload # a: ~complex, density: True (keyword), weights: +float | None (default) +def histogram( + a: _NestedList[complex], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> _HistogramResult[np.float64, np.complex128]: ... +@overload # a: ~complex, density: True (keyword), weights: +complex +def histogram( + a: _NestedList[complex], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLike[np.complexfloating] | _NestedList[complex], +) -> _HistogramResult[np.complex128, np.complex128]: ... +@overload # a: ~complex, density: False (default), weights: ~int | None (default) +def histogram( + a: _NestedList[complex], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + weights: _NestedSequence[int] | None = None, +) -> _HistogramResult[np.intp, np.complex128]: ... +@overload # a: ~complex, density: False (default), weights: known (keyword) +def histogram[WeightsT: np.bool | np.number | np.timedelta64]( + a: _NestedList[complex], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _ArrayLike[WeightsT], +) -> _HistogramResult[WeightsT, np.complex128]: ... +@overload # a: ~complex, density: False (default), weights: unknown (keyword) +def histogram( + a: _NestedList[complex], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _WeightsLike, +) -> _HistogramResult[Incomplete, np.complex128]: ... +@overload # a: known, density: True (keyword), weights: +float | None (default) +def histogram[ScalarT: np.inexact | np.object_]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> _HistogramResult[np.float64, ScalarT]: ... +@overload # a: known, density: True (keyword), weights: +complex +def histogram[ScalarT: np.inexact | np.object_]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLike[np.complexfloating] | _NestedList[complex], +) -> _HistogramResult[np.complex128, ScalarT]: ... +@overload # a: known, density: False (default), weights: ~int | None (default) +def histogram[ScalarT: np.inexact | np.object_]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + weights: _NestedSequence[int] | None = None, +) -> _HistogramResult[np.intp, ScalarT]: ... +@overload # a: known, density: False (default), weights: known (keyword) +def histogram[ScalarT: np.inexact | np.object_, WeightsT: np.bool | np.number | np.timedelta64]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _ArrayLike[WeightsT], +) -> _HistogramResult[WeightsT, ScalarT]: ... +@overload # a: known, density: False (default), weights: unknown (keyword) +def histogram[ScalarT: np.inexact | np.object_]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _WeightsLike, +) -> _HistogramResult[Incomplete, ScalarT]: ... +@overload # a: unknown, density: True (keyword), weights: +float | None (default) +def histogram( + a: _ArrayLikeComplex_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> _HistogramResult[np.float64, Incomplete]: ... +@overload # a: unknown, density: True (keyword), weights: +complex +def histogram( + a: _ArrayLikeComplex_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLike[np.complexfloating] | _NestedList[complex], +) -> _HistogramResult[np.complex128, Incomplete]: ... +@overload # a: unknown, density: False (default), weights: int | None (default) +def histogram( + a: _ArrayLikeComplex_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + weights: _NestedSequence[int] | None = None, +) -> _HistogramResult[np.intp, Incomplete]: ... +@overload # a: unknown, density: False (default), weights: known (keyword) +def histogram[WeightsT: np.bool | np.number | np.timedelta64]( + a: _ArrayLikeComplex_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _ArrayLike[WeightsT], +) -> _HistogramResult[WeightsT, Incomplete]: ... +@overload # a: unknown, density: False (default), weights: unknown (keyword) +def histogram( + a: _ArrayLikeComplex_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _WeightsLike, +) -> _HistogramResult[Incomplete, Incomplete]: ... +# unlike `histogram`, `weights` must be safe-castable to f64 +@overload # dtype +float64 +def histogramdd( + sample: _ArrayLikeInt_co | _NestedSequence[float] | _ArrayLikeObject_co, + bins: SupportsIndex | ArrayLike = 10, + range: Sequence[_Range] | None = None, + density: bool | None = None, + weights: _ArrayLikeFloat64_co | None = None, +) -> tuple[NDArray[np.float64], tuple[_Array1D[np.float64], ...]]: ... +@overload # dtype ~complex +def histogramdd( + sample: _NestedList[complex], + bins: SupportsIndex | ArrayLike = 10, + range: Sequence[_Range] | None = None, + density: bool | None = None, + weights: _ArrayLikeFloat64_co | None = None, +) -> tuple[NDArray[np.float64], tuple[_Array1D[np.complex128], ...]]: ... +@overload # dtype known +def histogramdd[ScalarT: np.inexact]( + sample: _ArrayLike[ScalarT], + bins: SupportsIndex | ArrayLike = 10, + range: Sequence[_Range] | None = None, + density: bool | None = None, + weights: _ArrayLikeFloat64_co | None = None, +) -> tuple[NDArray[np.float64], tuple[_Array1D[ScalarT], ...]]: ... +@overload # dtype unknown def histogramdd( - sample: ArrayLike, + sample: _ArrayLikeComplex_co, bins: SupportsIndex | ArrayLike = 10, - range: Sequence[tuple[float, float]] | None = None, + range: Sequence[_Range] | None = None, density: bool | None = None, - weights: ArrayLike | None = None, -) -> tuple[NDArray[Any], tuple[NDArray[Any], ...]]: ... + weights: _ArrayLikeFloat64_co | None = None, +) -> tuple[NDArray[np.float64], tuple[_Array1D[Any], ...]]: ... diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 40fef85b1853..5ee60d0fceaf 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -977,7 +977,7 @@ def diag_indices(n, ndim=2): And use it to set the diagonal of an array of zeros to 1: - >>> a = np.zeros((2, 2, 2), dtype=int) + >>> a = np.zeros((2, 2, 2), dtype=np.int_) >>> a[d3] = 1 >>> a array([[[1, 0], diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index ff316f566993..ae24f84189f0 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -6,6 +6,7 @@ from typing import ( Final, Generic, Literal as L, + Never, Self, SupportsIndex, final, @@ -23,13 +24,13 @@ from numpy._typing import ( _AnyShape, _ArrayLike, _DTypeLike, - _FiniteNestedSequence, - _HasDType, + _IntLike_co, _NestedSequence, + _ScalarLike_co, _SupportsArray, ) -__all__ = [ # noqa: RUF022 +__all__ = [ "ravel_multi_index", "unravel_index", "mgrid", @@ -48,24 +49,31 @@ __all__ = [ # noqa: RUF022 ### -_T = TypeVar("_T") -_TupleT = TypeVar("_TupleT", bound=tuple[Any, ...]) -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True) _BoolT_co = TypeVar("_BoolT_co", bound=bool, default=bool, covariant=True) - _AxisT_co = TypeVar("_AxisT_co", bound=int, default=L[0], covariant=True) _MatrixT_co = TypeVar("_MatrixT_co", bound=bool, default=L[False], covariant=True) _NDMinT_co = TypeVar("_NDMinT_co", bound=int, default=L[1], covariant=True) _Trans1DT_co = TypeVar("_Trans1DT_co", bound=int, default=L[-1], covariant=True) +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] + +type _Int1D = _Array1D[np.intp] + +type _ToArray1D[ScalarT: np.generic] = _Array1D[ScalarT] | Sequence[ScalarT] + +type _JustAnyShape = tuple[Never, Never, Never, Never, Never] # workaround for microsoft/pyright#10232 + ### class ndenumerate(Generic[_ScalarT_co]): @overload - def __init__(self: ndenumerate[_ScalarT], arr: _FiniteNestedSequence[_SupportsArray[np.dtype[_ScalarT]]]) -> None: ... + def __init__[ScalarT: np.generic]( + self: ndenumerate[ScalarT], + arr: _NestedSequence[_SupportsArray[np.dtype[ScalarT]]] | _SupportsArray[np.dtype[ScalarT]], + ) -> None: ... @overload def __init__(self: ndenumerate[np.str_], arr: str | _NestedSequence[str]) -> None: ... @overload @@ -156,26 +164,26 @@ class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co] # Keep in sync with _core.multiarray.concatenate @staticmethod @overload - def concatenate( - arrays: _ArrayLike[_ScalarT], + def concatenate[ScalarT: np.generic]( + arrays: _ArrayLike[ScalarT], /, axis: SupportsIndex | None = 0, out: None = None, *, dtype: None = None, casting: _CastingKind | None = "same_kind", - ) -> NDArray[_ScalarT]: ... + ) -> NDArray[ScalarT]: ... @staticmethod @overload - def concatenate( + def concatenate[ScalarT: np.generic]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = 0, out: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind | None = "same_kind", - ) -> NDArray[_ScalarT]: ... + ) -> NDArray[ScalarT]: ... @staticmethod @overload def concatenate( @@ -189,26 +197,26 @@ class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co] ) -> NDArray[Incomplete]: ... @staticmethod @overload - def concatenate( + def concatenate[OutT: np.ndarray]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = 0, *, - out: _ArrayT, + out: OutT, dtype: DTypeLike | None = None, casting: _CastingKind | None = "same_kind", - ) -> _ArrayT: ... + ) -> OutT: ... @staticmethod @overload - def concatenate( + def concatenate[OutT: np.ndarray]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None, - out: _ArrayT, + out: OutT, *, dtype: DTypeLike | None = None, casting: _CastingKind | None = "same_kind", - ) -> _ArrayT: ... + ) -> OutT: ... @final class RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]): @@ -228,33 +236,106 @@ class IndexExpression(Generic[_BoolT_co]): maketuple: _BoolT_co def __init__(self, maketuple: _BoolT_co) -> None: ... @overload - def __getitem__(self, item: _TupleT) -> _TupleT: ... + def __getitem__[TupleT: tuple[Any, ...]](self, item: TupleT) -> TupleT: ... @overload - def __getitem__(self: IndexExpression[L[True]], item: _T) -> tuple[_T]: ... + def __getitem__[T](self: IndexExpression[L[True]], item: T) -> tuple[T]: ... @overload - def __getitem__(self: IndexExpression[L[False]], item: _T) -> _T: ... + def __getitem__[T](self: IndexExpression[L[False]], item: T) -> T: ... + +# only the `int` sequences have special-cased shape-type overloads, because this is the +# most common use case and the others would require too many overloads to be worth it. +@overload # 0 +def ix_() -> tuple[()]: ... +@overload # 1 +int +def ix_(arg0: Sequence[int], /) -> tuple[_Array1D[np.int_]]: ... +@overload # 1 ScalarT +def ix_[ScalarT: np.generic]( + arg0: _ToArray1D[ScalarT], + /, +) -> tuple[_Array1D[ScalarT]]: ... +@overload # 2 +int +def ix_( + arg0: Sequence[int], + arg1: Sequence[int], + /, +) -> tuple[_Array2D[np.int_], _Array2D[np.int_]]: ... +@overload # 2 ScalarT +def ix_[ScalarT: np.generic]( + arg0: _ToArray1D[ScalarT], + arg1: _ToArray1D[ScalarT], + /, +) -> tuple[_Array2D[ScalarT], _Array2D[ScalarT]]: ... +@overload # 3 +int +def ix_( + arg0: Sequence[int], + arg1: Sequence[int], + arg2: Sequence[int], + /, +) -> tuple[_Array3D[np.int_], _Array3D[np.int_], _Array3D[np.int_]]: ... +@overload # 3 ScalarT +def ix_[ScalarT: np.generic]( + arg0: _ToArray1D[ScalarT], + arg1: _ToArray1D[ScalarT], + arg2: _ToArray1D[ScalarT], + /, +) -> tuple[_Array3D[ScalarT], _Array3D[ScalarT], _Array3D[ScalarT]]: ... +@overload # N +int +def ix_( + arg0: Sequence[int], + arg1: Sequence[int], + arg2: Sequence[int], + /, + *args: Sequence[int], +) -> tuple[NDArray[np.int_], ...]: ... +@overload # N ScalarT +def ix_[ScalarT: np.generic]( + arg0: _ToArray1D[ScalarT], + arg1: _ToArray1D[ScalarT], + arg2: _ToArray1D[ScalarT], + /, + *args: _ToArray1D[ScalarT], +) -> tuple[NDArray[ScalarT], ...]: ... +@overload # N float +def ix_(arg0: list[float], /, *args: Sequence[float]) -> tuple[NDArray[np.float64], ...]: ... +@overload # N complex +def ix_(arg0: list[complex], /, *args: Sequence[complex]) -> tuple[NDArray[np.complex128], ...]: ... +@overload # N bytes +def ix_(arg0: Sequence[bytes], /, *args: Sequence[bytes]) -> tuple[NDArray[np.bytes_], ...]: ... +@overload # N str +def ix_(arg0: Sequence[str], /, *args: Sequence[str]) -> tuple[NDArray[np.str_], ...]: ... +@overload # fallback +def ix_( + arg0: Sequence[_ScalarLike_co] | _Array1D[Any], + /, + *args: Sequence[_ScalarLike_co] | _Array1D[Any], +) -> tuple[NDArray[Any], ...]: ... +# +def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = False) -> None: ... + +# @overload -def ix_(*args: _FiniteNestedSequence[_HasDType[_DTypeT]]) -> tuple[np.ndarray[_AnyShape, _DTypeT], ...]: ... -@overload -def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ... -@overload -def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[np.bytes_], ...]: ... +def diag_indices(n: _IntLike_co, ndim: L[0]) -> tuple[()]: ... @overload -def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[np.bool], ...]: ... +def diag_indices(n: _IntLike_co, ndim: L[1]) -> tuple[_Int1D]: ... @overload -def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[np.intp], ...]: ... +def diag_indices(n: _IntLike_co, ndim: L[2] = 2) -> tuple[_Int1D, _Int1D]: ... @overload -def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[np.float64], ...]: ... +def diag_indices(n: _IntLike_co, ndim: L[3]) -> tuple[_Int1D, _Int1D, _Int1D]: ... @overload -def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[np.complex128], ...]: ... - -# -def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = False) -> None: ... +def diag_indices(n: _IntLike_co, ndim: int) -> tuple[_Int1D, ...]: ... # -def diag_indices(n: int, ndim: int = 2) -> tuple[NDArray[np.intp], ...]: ... -def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[np.intp], ...]: ... +@overload # ?d (workaround) +def diag_indices_from(arr: np.ndarray[_JustAnyShape]) -> tuple[_Int1D, _Int1D, *tuple[_Int1D, ...]]: ... +@overload # 2d +def diag_indices_from(arr: np.ndarray[tuple[int, int]]) -> tuple[_Int1D, _Int1D]: ... +@overload # 3d +def diag_indices_from(arr: np.ndarray[tuple[int, int, int]]) -> tuple[_Int1D, _Int1D, _Int1D]: ... +@overload # 4d +def diag_indices_from(arr: np.ndarray[tuple[int, int, int, int]]) -> tuple[_Int1D, _Int1D, _Int1D, _Int1D]: ... +@overload # >=2d (fallback) +def diag_indices_from(arr: np.ndarray[tuple[int, int, *tuple[int, ...]]]) -> tuple[_Int1D, _Int1D, *tuple[_Int1D, ...]]: ... # mgrid: Final[MGridClass] = ... diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index 3586b41de86c..ad1ee8785328 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -373,7 +373,7 @@ def validate(self, names, defaultfmt="f%i", nbfields=None): item += '_' cnt = seen.get(item, 0) if cnt > 0: - validatednames.append(item + '_%d' % cnt) + validatednames.append(f"{item}_{cnt}") else: validatednames.append(item) seen[item] = cnt + 1 @@ -497,7 +497,7 @@ class StringConverter: upgrade or not. Default is False. """ - _mapper = [(nx.bool, str2bool, False), + _mapper = [(nx.bool, str2bool, False), # noqa: RUF012 (nx.int_, int, -1),] # On 32-bit systems, we need to make sure that we explicitly include diff --git a/numpy/lib/_iotools.pyi b/numpy/lib/_iotools.pyi index 3bf41fc0cdde..7baca9c78045 100644 --- a/numpy/lib/_iotools.pyi +++ b/numpy/lib/_iotools.pyi @@ -5,7 +5,6 @@ from typing import ( Final, Literal, TypedDict, - TypeVar, Unpack, overload, type_check_only, @@ -15,8 +14,6 @@ import numpy as np import numpy.typing as npt from numpy._typing._dtype_like import _DTypeLikeNested -_T = TypeVar("_T") - @type_check_only class _NameValidatorKwargs(TypedDict, total=False): excludelist: Iterable[str] | None @@ -46,7 +43,7 @@ class LineSplitter: encoding: str | None = None, ) -> None: ... def __call__(self, /, line: str | bytes) -> list[str]: ... - def autostrip(self, /, method: Callable[[_T], Iterable[str]]) -> Callable[[_T], list[str]]: ... + def autostrip[T](self, /, method: Callable[[T], Iterable[str]]) -> Callable[[T], list[str]]: ... class NameValidator: defaultexcludelist: ClassVar[Sequence[str]] = ... diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index f030d74c5c11..86e3e9933784 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -1377,7 +1377,8 @@ def nanpercentile( if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") - q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100, out=...) + weak_q = type(q) in (int, float) # use weak promotion for final result type + q = np.true_divide(q, 100, out=...) if not fnb._quantile_is_valid(q): raise ValueError("Percentiles must be in the range [0, 100]") @@ -1393,7 +1394,7 @@ def nanpercentile( raise ValueError("Weights must be non-negative.") return _nanquantile_unchecked( - a, q, axis, out, overwrite_input, method, keepdims, weights) + a, q, axis, out, overwrite_input, method, keepdims, weights, weak_q) def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, @@ -1552,11 +1553,8 @@ def nanquantile( if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") - # Use dtype of array if possible (e.g., if q is a python int or float). - if isinstance(q, (int, float)) and a.dtype.kind == "f": - q = np.asanyarray(q, dtype=a.dtype) - else: - q = np.asanyarray(q) + weak_q = type(q) in (int, float) # use weak promotion for final result type + q = np.asanyarray(q) if not fnb._quantile_is_valid(q): raise ValueError("Quantiles must be in the range [0, 1]") @@ -1573,7 +1571,7 @@ def nanquantile( raise ValueError("Weights must be non-negative.") return _nanquantile_unchecked( - a, q, axis, out, overwrite_input, method, keepdims, weights) + a, q, axis, out, overwrite_input, method, keepdims, weights, weak_q) def _nanquantile_unchecked( @@ -1585,6 +1583,7 @@ def _nanquantile_unchecked( method="linear", keepdims=np._NoValue, weights=None, + weak_q=False, ): """Assumes that q is in [0, 1], and is an ndarray""" # apply_along_axis in _nanpercentile doesn't handle empty arrays well, @@ -1599,17 +1598,19 @@ def _nanquantile_unchecked( axis=axis, out=out, overwrite_input=overwrite_input, - method=method) + method=method, + weak_q=weak_q) def _nanquantile_ureduce_func( - a: np.array, - q: np.array, - weights: np.array, + a: np.ndarray, + q: np.ndarray, + weights: np.ndarray, axis: int | None = None, out=None, overwrite_input: bool = False, method="linear", + weak_q=False, ): """ Private function that doesn't support extended axis or keepdims. @@ -1619,11 +1620,12 @@ def _nanquantile_ureduce_func( if axis is None or a.ndim == 1: part = a.ravel() wgt = None if weights is None else weights.ravel() - result = _nanquantile_1d(part, q, overwrite_input, method, weights=wgt) + result = _nanquantile_1d(part, q, overwrite_input, method, + weights=wgt, weak_q=weak_q) # Note that this code could try to fill in `out` right away elif weights is None: result = np.apply_along_axis(_nanquantile_1d, axis, a, q, - overwrite_input, method, weights) + overwrite_input, method, weights, weak_q) # apply_along_axis fills in collapsed axis with results. # Move those axes to the beginning to match percentile's # convention. @@ -1647,6 +1649,7 @@ def _nanquantile_ureduce_func( result[(...,) + ii] = _nanquantile_1d( a[ii], q, weights=weights[ii], overwrite_input=overwrite_input, method=method, + weak_q=weak_q, ) # This path dealt with `out` already... return result @@ -1658,6 +1661,7 @@ def _nanquantile_ureduce_func( def _nanquantile_1d( arr1d, q, overwrite_input=False, method="linear", weights=None, + weak_q=False, ): """ Private function for rank 1 arrays. Compute quantile ignoring NaNs. @@ -1676,6 +1680,7 @@ def _nanquantile_1d( overwrite_input=overwrite_input, method=method, weights=weights, + weak_q=weak_q, ) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 72e746f19eba..0ea5da929895 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -765,7 +765,7 @@ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): namedict = kwds for i, val in enumerate(args): - key = 'arr_%d' % i + key = f'arr_{i}' if key in namedict.keys(): raise ValueError( f"Cannot use un-named variables and keyword {key}") @@ -841,7 +841,7 @@ def _preprocess_comments(iterable, comments, encoding): Generator that consumes a line iterated iterable and strips out the multiple (or multi-character) comments from lines. This is a pre-processing step to achieve feature parity with loadtxt - (we assume that this feature is a nieche feature). + (we assume that this feature is a niche feature). """ for line in iterable: if isinstance(line, bytes): @@ -1343,7 +1343,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, single escaped character: >>> s = StringIO('"Hello, my name is ""Monty""!"') - >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"') + >>> np.loadtxt(s, dtype=np.str_, delimiter=",", quotechar='"') array('Hello, my name is "Monty"!', dtype=' 2: raise ValueError( - "Expected 1D or 2D array, got %dD array instead" % X.ndim) + f"Expected 1D or 2D array, got {X.ndim}D array instead") elif X.ndim == 1: # Common case -- 1d array of numbers if X.dtype.names is None: @@ -1617,9 +1614,10 @@ def first_write(self, v): try: v = format % tuple(row) + newline except TypeError as e: - raise TypeError("Mismatch between array dtype ('%s') and " - "format specifier ('%s')" - % (str(X.dtype), format)) from e + raise TypeError( + f"Mismatch between array dtype ('{str(X.dtype)}') and " + f"format specifier ('{format}')" + ) from e fh.write(v) if len(footer) > 0: @@ -1758,8 +1756,11 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, in a list or produced by a generator are treated as lines. dtype : dtype, optional Data type of the resulting array. - If None, the dtypes will be determined by the contents of each - column, individually. + If a structured dtype, the output array will be 1D and structured where + each field corresponds to one column. + If None, the dtype of each column will be inferred automatically, and + the output array will be structured only if either the dtypes are not + all the same or if `names` is not None. comments : str, optional The character used to indicate the start of a comment. All the characters occurring on a line after a comment are discarded. @@ -1788,13 +1789,15 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, Which columns to read, with 0 being the first. For example, ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. names : {None, True, str, sequence}, optional - If `names` is True, the field names are read from the first line after - the first `skip_header` lines. This line can optionally be preceded - by a comment delimiter. Any content before the comment delimiter is - discarded. If `names` is a sequence or a single-string of - comma-separated names, the names will be used to define the field - names in a structured dtype. If `names` is None, the names of the - dtype fields will be used, if any. + If `names` is True, the output will be a structured array whose field + names are read from the first line after the first `skip_header` lines. + This line can optionally be preceded by a comment delimiter. Any content + before the comment delimiter is discarded. + If `names` is a sequence or a single string of comma-separated names, + the output is a structured array whose field names are taken from + `names`. + If `names` is None, the output is structured only if `dtype` is + structured, in which case the field names are taken from `dtype`. excludelist : sequence, optional A list of names to exclude. This list is appended to the default list ['return','file','print']. Excluded names are appended with an @@ -1963,7 +1966,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, if not isinstance(user_converters, dict): raise TypeError( "The input argument 'converter' should be a valid dictionary " - "(got '%s' instead)" % type(user_converters)) + f"(got '{type(user_converters)}' instead)" + ) if encoding == 'bytes': encoding = None @@ -2485,99 +2489,3 @@ def encode_unicode_cols(row_tup): _genfromtxt_with_like = array_function_dispatch()(genfromtxt) - - -def recfromtxt(fname, **kwargs): - """ - Load ASCII data from a file and return it in a record array. - - If ``usemask=False`` a standard `recarray` is returned, - if ``usemask=True`` a MaskedRecords array is returned. - - .. deprecated:: 2.0 - Use `numpy.genfromtxt` instead. - - Parameters - ---------- - fname, kwargs : For a description of input parameters, see `genfromtxt`. - - See Also - -------- - numpy.genfromtxt : generic function - - Notes - ----- - By default, `dtype` is None, which means that the data-type of the output - array will be determined from the data. - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`recfromtxt` is deprecated, " - "use `numpy.genfromtxt` instead." - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - kwargs.setdefault("dtype", None) - usemask = kwargs.get('usemask', False) - output = genfromtxt(fname, **kwargs) - if usemask: - from numpy.ma.mrecords import MaskedRecords - output = output.view(MaskedRecords) - else: - output = output.view(np.recarray) - return output - - -def recfromcsv(fname, **kwargs): - """ - Load ASCII data stored in a comma-separated file. - - The returned array is a record array (if ``usemask=False``, see - `recarray`) or a masked record array (if ``usemask=True``, - see `ma.mrecords.MaskedRecords`). - - .. deprecated:: 2.0 - Use `numpy.genfromtxt` with comma as `delimiter` instead. - - Parameters - ---------- - fname, kwargs : For a description of input parameters, see `genfromtxt`. - - See Also - -------- - numpy.genfromtxt : generic function to load ASCII data. - - Notes - ----- - By default, `dtype` is None, which means that the data-type of the output - array will be determined from the data. - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`recfromcsv` is deprecated, " - "use `numpy.genfromtxt` with comma as `delimiter` instead. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - # Set default kwargs for genfromtxt as relevant to csv import. - kwargs.setdefault("case_sensitive", "lower") - kwargs.setdefault("names", True) - kwargs.setdefault("delimiter", ",") - kwargs.setdefault("dtype", None) - output = genfromtxt(fname, **kwargs) - - usemask = kwargs.get("usemask", False) - if usemask: - from numpy.ma.mrecords import MaskedRecords - output = output.view(MaskedRecords) - else: - output = output.view(np.recarray) - return output diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 349edd06f57b..07da3cd357c1 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -13,20 +13,18 @@ from typing import ( IO, Any, ClassVar, - Generic, Literal as L, Protocol, Self, - TypeAlias, overload, + override, type_check_only, ) -from typing_extensions import TypeVar, override +from typing_extensions import TypeVar import numpy as np from numpy._core.multiarray import packbits, unpackbits from numpy._typing import ArrayLike, DTypeLike, NDArray, _DTypeLike, _SupportsArrayFunc -from numpy.ma.mrecords import MaskedRecords from ._datasource import DataSource as DataSource @@ -43,23 +41,22 @@ __all__ = [ "unpackbits", ] -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True) -_FName: TypeAlias = StrPath | Iterable[str] | Iterable[bytes] -_FNameRead: TypeAlias = StrPath | SupportsRead[str] | SupportsRead[bytes] -_FNameWriteBytes: TypeAlias = StrPath | SupportsWrite[bytes] -_FNameWrite: TypeAlias = _FNameWriteBytes | SupportsWrite[str] +type _FName = StrPath | Iterable[str] | Iterable[bytes] +type _FNameRead = StrPath | SupportsRead[str] | SupportsRead[bytes] +type _FNameWriteBytes = StrPath | SupportsWrite[bytes] +type _FNameWrite = _FNameWriteBytes | SupportsWrite[str] + +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] @type_check_only -class _SupportsReadSeek(SupportsRead[_T_co], Protocol[_T_co]): +class _SupportsReadSeek[T](SupportsRead[T], Protocol): def seek(self, offset: int, whence: int, /) -> object: ... -class BagObj(Generic[_T_co]): - def __init__(self, /, obj: SupportsKeysAndGetItem[str, _T_co]) -> None: ... - def __getattribute__(self, key: str, /) -> _T_co: ... +class BagObj[T]: + def __init__(self, /, obj: SupportsKeysAndGetItem[str, T]) -> None: ... + def __getattribute__(self, key: str, /) -> T: ... def __dir__(self) -> list[str]: ... class NpzFile(Mapping[str, NDArray[_ScalarT_co]]): @@ -96,9 +93,9 @@ class NpzFile(Mapping[str, NDArray[_ScalarT_co]]): # @override @overload - def get(self, key: str, default: None = None, /) -> NDArray[_ScalarT_co] | None: ... + def get(self, key: str, default: None = None, /) -> NDArray[_ScalarT_co] | None: ... # pyrefly: ignore[bad-override] @overload - def get(self, key: str, default: NDArray[_ScalarT_co] | _T, /) -> NDArray[_ScalarT_co] | _T: ... # pyright: ignore[reportIncompatibleMethodOverride] + def get[T](self, key: str, default: NDArray[_ScalarT_co] | T, /) -> NDArray[_ScalarT_co] | T: ... # pyright: ignore[reportIncompatibleMethodOverride] # def close(self) -> None: ... @@ -139,9 +136,9 @@ def loadtxt( like: _SupportsArrayFunc | None = None, ) -> NDArray[np.float64]: ... @overload -def loadtxt( +def loadtxt[ScalarT: np.generic]( fname: _FName, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], comments: str | Sequence[str] | None = "#", delimiter: str | None = None, converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, @@ -154,7 +151,7 @@ def loadtxt( *, quotechar: str | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def loadtxt( fname: _FName, @@ -186,19 +183,19 @@ def savetxt( ) -> None: ... @overload -def fromregex( +def fromregex[ScalarT: np.generic]( file: _FNameRead, regexp: str | bytes | Pattern[Any], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], encoding: str | None = None, -) -> NDArray[_ScalarT]: ... +) -> _Array1D[ScalarT]: ... @overload def fromregex( file: _FNameRead, regexp: str | bytes | Pattern[Any], dtype: DTypeLike | None, encoding: str | None = None, -) -> NDArray[Any]: ... +) -> _Array1D[Any]: ... @overload def genfromtxt( @@ -230,9 +227,9 @@ def genfromtxt( like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload -def genfromtxt( +def genfromtxt[ScalarT: np.generic]( fname: _FName, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], comments: str = "#", delimiter: str | int | Iterable[int] | None = None, skip_header: int = 0, @@ -257,7 +254,7 @@ def genfromtxt( *, ndmin: L[0, 1, 2] = 0, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def genfromtxt( fname: _FName, @@ -287,13 +284,3 @@ def genfromtxt( ndmin: L[0, 1, 2] = 0, like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... - -@overload -def recfromtxt(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ... -@overload -def recfromtxt(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ... - -@overload -def recfromcsv(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ... -@overload -def recfromcsv(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ... diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index e9d2d5d23fc6..81f2a0e5d7cd 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -5,7 +5,6 @@ __all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd', 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d', 'polyfit'] - import functools import re import warnings @@ -141,8 +140,7 @@ def poly(seq_of_zeros): seq_of_zeros = eigvals(seq_of_zeros) elif len(sh) == 1: dt = seq_of_zeros.dtype - # Let object arrays slip through, e.g. for arbitrary precision - if dt != object: + if dt.type is not NX.object_: seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char)) else: raise ValueError("input must be 1d or non-empty square 2d array.") @@ -252,6 +250,10 @@ def roots(p): A = diag(NX.ones((N - 2,), p.dtype), -1) A[0, :] = -p[1:] / p[0] roots = eigvals(A) + + # backwards compat: return real values if possible + from numpy.linalg._linalg import _to_real_if_imag_zero + roots = _to_real_if_imag_zero(roots, A) else: roots = NX.array([]) @@ -1325,9 +1327,9 @@ def fmt_float(q): elif coefstr == '0': newstr = '' elif coefstr == 'b': - newstr = '%s**%d' % (var, power,) + newstr = f'{var}**{power}' else: - newstr = '%s %s**%d' % (coefstr, var, power) + newstr = f'{coefstr} {var}**{power}' if k > 0: if newstr != '': diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index 9c02a7f867c5..82ec616d6458 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -1,11 +1,13 @@ +from _typeshed import ConvertibleToInt, Incomplete +from collections.abc import Iterator from typing import ( Any, + ClassVar, Literal as L, NoReturn, + Self, SupportsIndex, SupportsInt, - TypeAlias, - TypeVar, overload, ) @@ -18,7 +20,6 @@ from numpy import ( int32, int64, object_, - poly1d, signedinteger, unsignedinteger, ) @@ -31,18 +32,15 @@ from numpy._typing import ( _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeUInt_co, + _FloatLike_co, + _NestedSequence, + _ScalarLike_co, ) -_T = TypeVar("_T") +type _2Tup[T] = tuple[T, T] +type _5Tup[T] = tuple[T, NDArray[float64], NDArray[int32], NDArray[float64], NDArray[float64]] -_2Tup: TypeAlias = tuple[_T, _T] -_5Tup: TypeAlias = tuple[ - _T, - NDArray[float64], - NDArray[int32], - NDArray[float64], - NDArray[float64], -] +### __all__ = [ "poly", @@ -58,6 +56,98 @@ __all__ = [ "polyfit", ] +class poly1d: + __module__: L["numpy"] = "numpy" # pyrefly: ignore[bad-override] + + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + + @property + def variable(self) -> str: ... + @property + def order(self) -> int: ... + @property + def o(self) -> int: ... + @property + def roots(self) -> NDArray[Incomplete]: ... + @property + def r(self) -> NDArray[Incomplete]: ... + + # + @property + def coeffs(self) -> NDArray[Incomplete]: ... + @coeffs.setter + def coeffs(self, value: NDArray[Incomplete], /) -> None: ... + + # + @property + def c(self) -> NDArray[Any]: ... + @c.setter + def c(self, value: NDArray[Incomplete], /) -> None: ... + + # + @property + def coef(self) -> NDArray[Incomplete]: ... + @coef.setter + def coef(self, value: NDArray[Incomplete], /) -> None: ... + + # + @property + def coefficients(self) -> NDArray[Incomplete]: ... + @coefficients.setter + def coefficients(self, value: NDArray[Incomplete], /) -> None: ... + + # + def __init__(self, /, c_or_r: ArrayLike, r: bool = False, variable: str | None = None) -> None: ... + + # + @overload + def __array__(self, /, t: None = None, copy: bool | None = None) -> np.ndarray[tuple[int], np.dtype[Incomplete]]: ... + @overload + def __array__[DTypeT: np.dtype](self, /, t: DTypeT, copy: bool | None = None) -> np.ndarray[tuple[int], DTypeT]: ... + + # + @overload + def __call__(self, /, val: _ScalarLike_co) -> Incomplete: ... + @overload + def __call__(self, /, val: poly1d) -> Self: ... + @overload + def __call__(self, /, val: NDArray[Incomplete] | _NestedSequence[_ScalarLike_co]) -> NDArray[Incomplete]: ... + + # + def __len__(self) -> int: ... + def __iter__(self) -> Iterator[Incomplete]: ... + + # + def __getitem__(self, val: int, /) -> Incomplete: ... + def __setitem__(self, key: int, val: Incomplete, /) -> None: ... + + def __neg__(self) -> Self: ... + def __pos__(self) -> Self: ... + + # + def __add__(self, other: ArrayLike, /) -> Self: ... + def __radd__(self, other: ArrayLike, /) -> Self: ... + + # + def __sub__(self, other: ArrayLike, /) -> Self: ... + def __rsub__(self, other: ArrayLike, /) -> Self: ... + + # + def __mul__(self, other: ArrayLike, /) -> Self: ... + def __rmul__(self, other: ArrayLike, /) -> Self: ... + + # + def __pow__(self, val: _FloatLike_co, /) -> Self: ... # Integral floats are accepted + + # + def __truediv__(self, other: ArrayLike, /) -> Self: ... + def __rtruediv__(self, other: ArrayLike, /) -> Self: ... + + # + def deriv(self, /, m: ConvertibleToInt = 1) -> Self: ... + def integ(self, /, m: ConvertibleToInt = 1, k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = 0) -> poly1d: ... + +# def poly(seq_of_zeros: ArrayLike) -> NDArray[floating]: ... # Returns either a float or complex array depending on the input values. diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index c9e0fd316e04..454d3f5d2c26 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -1,9 +1,8 @@ import functools -import warnings import numpy as np import numpy._core.numeric as _nx -from numpy._core import atleast_3d, overrides, vstack +from numpy._core import atleast_3d, overrides from numpy._core._multiarray_umath import _array_converter from numpy._core.fromnumeric import reshape, transpose from numpy._core.multiarray import normalize_axis_index @@ -15,13 +14,12 @@ zeros, zeros_like, ) -from numpy._core.overrides import set_module from numpy._core.shape_base import _arrays_for_stack_dispatcher from numpy.lib._index_tricks_impl import ndindex from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells __all__ = [ - 'column_stack', 'row_stack', 'dstack', 'array_split', 'split', + 'column_stack', 'dstack', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims', 'apply_along_axis', 'kron', 'tile', 'take_along_axis', 'put_along_axis' @@ -602,22 +600,6 @@ def expand_dims(a, axis): return a.reshape(shape) -# NOTE: Remove once deprecation period passes -@set_module("numpy") -def row_stack(tup, *, dtype=None, casting="same_kind"): - # Deprecated in NumPy 2.0, 2023-08-18 - warnings.warn( - "`row_stack` alias is deprecated. " - "Use `np.vstack` directly.", - DeprecationWarning, - stacklevel=2 - ) - return vstack(tup, dtype=dtype, casting=casting) - - -row_stack.__doc__ = vstack.__doc__ - - def _column_stack_dispatcher(tup): return _arrays_for_stack_dispatcher(tup) @@ -1049,30 +1031,6 @@ def dsplit(ary, indices_or_sections): return split(ary, indices_or_sections, 2) -def get_array_wrap(*args): - """Find the wrapper for the array with the highest priority. - - In case of ties, leftmost wins. If no wrapper is found, return None. - - .. deprecated:: 2.0 - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`get_array_wrap` is deprecated. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - wrappers = sorted((getattr(x, '__array_priority__', 0), -i, - x.__array_wrap__) for i, x in enumerate(args) - if hasattr(x, '__array_wrap__')) - if wrappers: - return wrappers[-1][-1] - return None - - def _kron_dispatcher(a, b): return (a, b) diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index 352f57dd810a..565f48f862d9 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -1,45 +1,35 @@ +from _typeshed import Incomplete from collections.abc import Callable, Sequence from typing import ( Any, Concatenate, - ParamSpec, + Never, Protocol, + Self, SupportsIndex, - TypeVar, overload, type_check_only, ) -from typing_extensions import deprecated import numpy as np -from numpy import ( - _CastingKind, - complexfloating, - floating, - generic, - integer, - object_, - signedinteger, - ufunc, - unsignedinteger, -) from numpy._typing import ( ArrayLike, - DTypeLike, NDArray, + _AnyShape, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeComplex_co, _ArrayLikeFloat_co, + _ArrayLikeInt, _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeUInt_co, + _Shape, _ShapeLike, ) __all__ = [ "column_stack", - "row_stack", "dstack", "array_split", "split", @@ -55,16 +45,13 @@ __all__ = [ "put_along_axis", ] -_P = ParamSpec("_P") -_ScalarT = TypeVar("_ScalarT", bound=generic) - # Signature of `__array_wrap__` @type_check_only class _ArrayWrap(Protocol): def __call__( self, array: NDArray[Any], - context: tuple[ufunc, tuple[Any, ...], int] | None = ..., + context: tuple[np.ufunc, tuple[Any, ...], int] | None = ..., return_scalar: bool = ..., /, ) -> Any: ... @@ -74,163 +61,196 @@ class _SupportsArrayWrap(Protocol): @property def __array_wrap__(self) -> _ArrayWrap: ... +# Protocol for array-like objects that preserve their type through split operations. +# Requires shape for size, ndim for dimensional checks in hsplit/vsplit/dsplit, +# swapaxes for axis manipulation, and __getitem__ for slicing. +@type_check_only +class _SupportsSplitOps(Protocol): + @property + def shape(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + def swapaxes(self, axis1: int, axis2: int, /) -> Self: ... + def __getitem__(self, key: Any, /) -> Self: ... + +type _JustAnyShape = tuple[Never, Never, Never] # workaround for microsoft/pyright#10232 + ### -def take_along_axis( - arr: _ScalarT | NDArray[_ScalarT], - indices: NDArray[integer], +def take_along_axis[ScalarT: np.generic]( + arr: ScalarT | NDArray[ScalarT], + indices: NDArray[np.integer], axis: int | None = -1, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... -def put_along_axis( - arr: NDArray[_ScalarT], - indices: NDArray[integer], +# +def put_along_axis[ScalarT: np.generic]( + arr: NDArray[ScalarT], + indices: NDArray[np.integer], values: ArrayLike, axis: int | None, ) -> None: ... +# @overload -def apply_along_axis( - func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_ScalarT]], +def apply_along_axis[**Tss, ScalarT: np.generic]( + func1d: Callable[Concatenate[np.ndarray, Tss], _ArrayLike[ScalarT]], axis: SupportsIndex, arr: ArrayLike, - *args: _P.args, - **kwargs: _P.kwargs, -) -> NDArray[_ScalarT]: ... + *args: Tss.args, + **kwargs: Tss.kwargs, +) -> NDArray[ScalarT]: ... @overload -def apply_along_axis( - func1d: Callable[Concatenate[NDArray[Any], _P], Any], +def apply_along_axis[**Tss]( + func1d: Callable[Concatenate[np.ndarray, Tss], Any], axis: SupportsIndex, arr: ArrayLike, - *args: _P.args, - **kwargs: _P.kwargs, + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> NDArray[Any]: ... -def apply_over_axes( - func: Callable[[NDArray[Any], int], NDArray[_ScalarT]], +# +def apply_over_axes[ScalarT: np.generic]( + func: Callable[[np.ndarray, int], NDArray[ScalarT]], a: ArrayLike, - axes: int | Sequence[int], -) -> NDArray[_ScalarT]: ... + axes: _ShapeLike, +) -> NDArray[ScalarT]: ... -@overload -def expand_dims( - a: _ArrayLike[_ScalarT], - axis: _ShapeLike, -) -> NDArray[_ScalarT]: ... -@overload -def expand_dims( - a: ArrayLike, - axis: _ShapeLike, -) -> NDArray[Any]: ... - -# Deprecated in NumPy 2.0, 2023-08-18 -@deprecated("`row_stack` alias is deprecated. Use `np.vstack` directly.") -def row_stack( - tup: Sequence[ArrayLike], - *, - dtype: DTypeLike | None = None, - casting: _CastingKind = "same_kind", -) -> NDArray[Any]: ... +# +@overload # Nd -> Nd +def expand_dims[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + axis: tuple[()], +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # ?d -> ?d (workaround) +def expand_dims[DTypeT: np.dtype]( + a: np.ndarray[_JustAnyShape, DTypeT], + axis: int | tuple[int, ...], +) -> np.ndarray[_AnyShape, DTypeT]: ... +@overload # 0d -> 1d +def expand_dims[ScalarT: np.generic]( + a: ScalarT | np.ndarray[tuple[()], np.dtype[ScalarT]], + axis: int | tuple[int], +) -> np.ndarray[tuple[int], np.dtype[ScalarT]]: ... +@overload # 0d -> 2d +def expand_dims[ScalarT: np.generic]( + a: ScalarT | np.ndarray[tuple[()], np.dtype[ScalarT]], + axis: tuple[int, int], +) -> np.ndarray[tuple[int, int], np.dtype[ScalarT]]: ... +@overload # 1d -> 2d +def expand_dims[DTypeT: np.dtype]( + a: np.ndarray[tuple[int], DTypeT], + axis: int | tuple[int], +) -> np.ndarray[tuple[int, int], DTypeT]: ... +@overload # 1d -> 3d +def expand_dims[DTypeT: np.dtype]( + a: np.ndarray[tuple[int], DTypeT], + axis: tuple[int, int], +) -> np.ndarray[tuple[int, int, int], DTypeT]: ... +@overload # 2d -> 3d +def expand_dims[DTypeT: np.dtype]( + a: np.ndarray[tuple[int, int], DTypeT], + axis: int | tuple[int], +) -> np.ndarray[tuple[int, int, int], DTypeT]: ... +@overload # 2d -> 4d +def expand_dims[DTypeT: np.dtype]( + a: np.ndarray[tuple[int, int], DTypeT], + axis: tuple[int, int], +) -> np.ndarray[tuple[int, int, int, int], DTypeT]: ... +@overload # Nd -> ?d +def expand_dims[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], +) -> NDArray[ScalarT]: ... +@overload # fallback +def expand_dims(a: ArrayLike, axis: int | tuple[int, ...]) -> NDArray[Any]: ... # keep in sync with `numpy.ma.extras.column_stack` @overload -def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... +def column_stack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> NDArray[ScalarT]: ... @overload -def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... +def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Incomplete]: ... # keep in sync with `numpy.ma.extras.dstack` @overload -def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... +def dstack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> NDArray[ScalarT]: ... @overload -def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... +def dstack(tup: Sequence[ArrayLike]) -> NDArray[Incomplete]: ... +# @overload -def array_split( - ary: _ArrayLike[_ScalarT], +def array_split[SplitableT: _SupportsSplitOps]( + ary: SplitableT, indices_or_sections: _ShapeLike, axis: SupportsIndex = 0, -) -> list[NDArray[_ScalarT]]: ... +) -> list[SplitableT]: ... @overload -def array_split( - ary: ArrayLike, +def array_split[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, axis: SupportsIndex = 0, -) -> list[NDArray[Any]]: ... +) -> list[NDArray[ScalarT]]: ... +@overload +def array_split(ary: ArrayLike, indices_or_sections: _ShapeLike, axis: SupportsIndex = 0) -> list[NDArray[Incomplete]]: ... +# @overload -def split( - ary: _ArrayLike[_ScalarT], +def split[SplitableT: _SupportsSplitOps]( + ary: SplitableT, indices_or_sections: _ShapeLike, axis: SupportsIndex = 0, -) -> list[NDArray[_ScalarT]]: ... +) -> list[SplitableT]: ... @overload -def split( - ary: ArrayLike, +def split[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, axis: SupportsIndex = 0, -) -> list[NDArray[Any]]: ... +) -> list[NDArray[ScalarT]]: ... +@overload +def split(ary: ArrayLike, indices_or_sections: _ShapeLike, axis: SupportsIndex = 0) -> list[NDArray[Incomplete]]: ... # keep in sync with `numpy.ma.extras.hsplit` @overload -def hsplit( - ary: _ArrayLike[_ScalarT], - indices_or_sections: _ShapeLike, -) -> list[NDArray[_ScalarT]]: ... +def hsplit[SplitableT: _SupportsSplitOps](ary: SplitableT, indices_or_sections: _ShapeLike) -> list[SplitableT]: ... @overload -def hsplit( - ary: ArrayLike, - indices_or_sections: _ShapeLike, -) -> list[NDArray[Any]]: ... - -@overload -def vsplit( - ary: _ArrayLike[_ScalarT], - indices_or_sections: _ShapeLike, -) -> list[NDArray[_ScalarT]]: ... +def hsplit[ScalarT: np.generic](ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike) -> list[NDArray[ScalarT]]: ... @overload -def vsplit( - ary: ArrayLike, - indices_or_sections: _ShapeLike, -) -> list[NDArray[Any]]: ... +def hsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[NDArray[Incomplete]]: ... +# @overload -def dsplit( - ary: _ArrayLike[_ScalarT], - indices_or_sections: _ShapeLike, -) -> list[NDArray[_ScalarT]]: ... +def vsplit[SplitableT: _SupportsSplitOps](ary: SplitableT, indices_or_sections: _ShapeLike) -> list[SplitableT]: ... @overload -def dsplit( - ary: ArrayLike, - indices_or_sections: _ShapeLike, -) -> list[NDArray[Any]]: ... +def vsplit[ScalarT: np.generic](ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike) -> list[NDArray[ScalarT]]: ... +@overload +def vsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[NDArray[Incomplete]]: ... +# +@overload +def dsplit[SplitableT: _SupportsSplitOps](ary: SplitableT, indices_or_sections: _ShapeLike) -> list[SplitableT]: ... @overload -def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ... +def dsplit[ScalarT: np.generic](ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike) -> list[NDArray[ScalarT]]: ... @overload -def get_array_wrap(*args: object) -> _ArrayWrap | None: ... +def dsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[NDArray[Incomplete]]: ... +# @overload -def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] +def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... @overload -def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... # type: ignore[misc] +def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[np.unsignedinteger]: ... @overload -def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... # type: ignore[misc] +def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[np.signedinteger]: ... @overload -def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating]: ... # type: ignore[misc] +def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[np.floating]: ... @overload -def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... +def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[np.complexfloating]: ... @overload -def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ... +def kron(a: _ArrayLikeObject_co, b: object) -> NDArray[np.object_]: ... @overload -def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ... +def kron(a: object, b: _ArrayLikeObject_co) -> NDArray[np.object_]: ... +# @overload -def tile( - A: _ArrayLike[_ScalarT], - reps: int | Sequence[int], -) -> NDArray[_ScalarT]: ... +def tile[ScalarT: np.generic](A: _ArrayLike[ScalarT], reps: _ArrayLikeInt) -> NDArray[ScalarT]: ... @overload -def tile( - A: ArrayLike, - reps: int | Sequence[int], -) -> NDArray[Any]: ... +def tile(A: ArrayLike, reps: _ArrayLikeInt) -> NDArray[Incomplete]: ... diff --git a/numpy/lib/_stride_tricks_impl.py b/numpy/lib/_stride_tricks_impl.py index 98a79b325f66..cee16fb9e7fc 100644 --- a/numpy/lib/_stride_tricks_impl.py +++ b/numpy/lib/_stride_tricks_impl.py @@ -7,8 +7,9 @@ import numpy as np from numpy._core.numeric import normalize_axis_tuple from numpy._core.overrides import array_function_dispatch, set_module +from numpy.lib._array_utils_impl import byte_bounds -__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes'] +__all__ = ["broadcast_to", "broadcast_arrays", "broadcast_shapes"] class DummyArray: @@ -35,7 +36,9 @@ def _maybe_view_as_subclass(original_array, new_array): @set_module("numpy.lib.stride_tricks") -def as_strided(x, shape=None, strides=None, subok=False, writeable=True): +def as_strided( + x, shape=None, strides=None, subok=False, writeable=True, *, check_bounds=None +): """ Create a view into the array with the given shape and strides. @@ -55,11 +58,20 @@ def as_strided(x, shape=None, strides=None, subok=False, writeable=True): If set to False, the returned array will always be readonly. Otherwise it will be writable if the original array was. It is advisable to set this to False if possible (see Notes). + check_bounds : bool or None + Check new stride and shape for potential out of bound memory + access. Returns ------- view : ndarray + Raises + ------ + ValueError + If `check_bounds` is True the given shape and strides could result in + out-of-bounds memory access. + See also -------- broadcast_to : broadcast an array to a given shape. @@ -69,7 +81,7 @@ def as_strided(x, shape=None, strides=None, subok=False, writeable=True): Notes ----- - ``as_strided`` creates a view into the array given the exact strides + `as_strided` creates a view into the array given the exact strides and shape. This means it manipulates the internal data structure of ndarray and, if done incorrectly, the array elements can point to invalid memory and can corrupt results or crash your program. @@ -87,27 +99,73 @@ def as_strided(x, shape=None, strides=None, subok=False, writeable=True): care, you may want to use ``writeable=False`` to avoid accidental write operations. - For these reasons it is advisable to avoid ``as_strided`` when + For these reasons it is advisable to avoid `as_strided` when possible. + + Examples + -------- + + >>> import numpy as np + ... from numpy.lib.stride_tricks import as_strided + ... x = np.arange(10) + ... y = as_strided(x, shape=(5,), strides=(8,), check_bounds=True) + ... y + array([0, 1, 2, 3, 4]) + + Attempting to create an out-of-bounds view and use ``check_bounds=True`` + as_strided will raises an error: + + >>> as_strided(x, shape=(20,), strides=(8,), check_bounds=True) + Traceback (most recent call last): + ... + ValueError: Given shape and strides would access memory out of bounds... + + When working with views, bounds are checked against the base array: + + >>> a = np.arange(1000) + ... b = a[:2] + ... c = as_strided(b, shape=(2,), strides=(400,), check_bounds=True) + ... c[0], c[1] + (0, 50) """ + # first convert input to array, possibly keeping subclass - x = np.array(x, copy=None, subok=subok) - interface = dict(x.__array_interface__) + base = np.array(x, copy=None, subok=subok) + interface = dict(base.__array_interface__) if shape is not None: interface['shape'] = tuple(shape) if strides is not None: interface['strides'] = tuple(strides) - array = np.asarray(DummyArray(interface, base=x)) + array = np.asarray(DummyArray(interface, base=base)) # The route via `__interface__` does not preserve structured # dtypes. Since dtype should remain unchanged, we set it explicitly. - array.dtype = x.dtype + array._set_dtype(base.dtype) - view = _maybe_view_as_subclass(x, array) + view = _maybe_view_as_subclass(base, array) if view.flags.writeable and not writeable: view.flags.writeable = False + if check_bounds: + while isinstance(base.base, np.ndarray): + base = base.base + + base_low, base_high = byte_bounds(base) + view_low, view_high = byte_bounds(view) + + if view_low < base_low: + raise ValueError( + f"Given shape and strides would access memory out of bounds. " + f"View starts {base_low - view_low} bytes before lowest address" + ) + + if view_high > base_high: + raise ValueError( + f"Given shape and strides would access memory out of bounds. " + f"View ends {view_high - base_high} bytes after highest address" + ) + return view @@ -168,7 +226,8 @@ def sliding_window_view(x, window_shape, axis=None, *, See Also -------- lib.stride_tricks.as_strided: A lower-level and less safe routine for - creating arbitrary views from custom shape and strides. + creating arbitrary views from custom shape and strides. Use the + ``check_bounds`` parameter for bounds validation. broadcast_to: broadcast an array to a given shape. Notes @@ -305,6 +364,21 @@ def sliding_window_view(x, window_shape, axis=None, *, >>> moving_average array([1., 2., 3., 4.]) + To adjust the step size of the sliding window, index the output view along + the desired dimension(s). Using the array shown above: + + >>> v[::2] + array([[0, 1, 2], + [2, 3, 4]]) + + You can slide in the reverse direction using the same technique: + + >>> v[::-1] + array([[3, 4, 5], + [2, 3, 4], + [1, 2, 3], + [0, 1, 2]]) + The two examples below demonstrate the effect of ``writeable=True``. Creating a view with the default ``writeable=False`` and then writing to diff --git a/numpy/lib/_stride_tricks_impl.pyi b/numpy/lib/_stride_tricks_impl.pyi index 008f2d544414..efa3c7bb4e63 100644 --- a/numpy/lib/_stride_tricks_impl.pyi +++ b/numpy/lib/_stride_tricks_impl.pyi @@ -1,13 +1,11 @@ from collections.abc import Iterable -from typing import Any, SupportsIndex, TypeVar, overload +from typing import Any, overload -from numpy import generic +import numpy as np from numpy._typing import ArrayLike, NDArray, _AnyShape, _ArrayLike, _ShapeLike __all__ = ["broadcast_to", "broadcast_arrays", "broadcast_shapes"] -_ScalarT = TypeVar("_ScalarT", bound=generic) - class DummyArray: __array_interface__: dict[str, Any] base: NDArray[Any] | None @@ -18,13 +16,15 @@ class DummyArray: ) -> None: ... @overload -def as_strided( - x: _ArrayLike[_ScalarT], +def as_strided[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], shape: Iterable[int] | None = None, strides: Iterable[int] | None = None, subok: bool = False, writeable: bool = True, -) -> NDArray[_ScalarT]: ... + *, + check_bounds: bool | None = None +) -> NDArray[ScalarT]: ... @overload def as_strided( x: ArrayLike, @@ -32,33 +32,35 @@ def as_strided( strides: Iterable[int] | None = None, subok: bool = False, writeable: bool = True, + *, + check_bounds: bool | None = None ) -> NDArray[Any]: ... @overload -def sliding_window_view( - x: _ArrayLike[_ScalarT], +def sliding_window_view[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], window_shape: int | Iterable[int], - axis: SupportsIndex | None = None, + axis: int | tuple[int, ...] | None = None, *, subok: bool = False, writeable: bool = False, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def sliding_window_view( x: ArrayLike, window_shape: int | Iterable[int], - axis: SupportsIndex | None = None, + axis: int | tuple[int, ...] | None = None, *, subok: bool = False, writeable: bool = False, ) -> NDArray[Any]: ... @overload -def broadcast_to( - array: _ArrayLike[_ScalarT], +def broadcast_to[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], shape: int | Iterable[int], subok: bool = False, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def broadcast_to( array: ArrayLike, diff --git a/numpy/lib/_twodim_base_impl.py b/numpy/lib/_twodim_base_impl.py index bad797a970c9..2426134c4083 100644 --- a/numpy/lib/_twodim_base_impl.py +++ b/numpy/lib/_twodim_base_impl.py @@ -3,6 +3,8 @@ """ import functools import operator +import os +import warnings from numpy._core import iinfo, overrides from numpy._core._multiarray_umath import _array_converter @@ -217,7 +219,7 @@ def eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None): Examples -------- >>> import numpy as np - >>> np.eye(2, dtype=int) + >>> np.eye(2, dtype=np.int_) array([[1, 0], [0, 1]]) >>> np.eye(3, k=1) @@ -385,7 +387,6 @@ def diagflat(v, k=0): return conv.wrap(res) - @finalize_array_function_like @set_module('numpy') def tri(N, M=None, k=0, dtype=float, *, like=None): @@ -418,7 +419,7 @@ def tri(N, M=None, k=0, dtype=float, *, like=None): Examples -------- >>> import numpy as np - >>> np.tri(3, 5, 2, dtype=int) + >>> np.tri(3, 5, 2, dtype=np.int_) array([[1, 1, 1, 0, 0], [1, 1, 1, 1, 0], [1, 1, 1, 1, 1]]) @@ -429,11 +430,28 @@ def tri(N, M=None, k=0, dtype=float, *, like=None): [1., 1., 0., 0., 0.]]) """ + + warning_for_type = None + try: + N = operator.index(N) + except TypeError: + warning_for_type = warning_for_type or type(N) + if like is not None: return _tri_with_like(like, N, M=M, k=k, dtype=dtype) if M is None: M = N + else: + try: + M = operator.index(M) + except TypeError: + warning_for_type = warning_for_type or type(M) + + try: + k = operator.index(k) + except TypeError: + warning_for_type = warning_for_type or type(k) m = greater_equal.outer(arange(N, dtype=_min_int(0, N)), arange(-k, M - k, dtype=_min_int(-k, M - k))) @@ -441,6 +459,15 @@ def tri(N, M=None, k=0, dtype=float, *, like=None): # Avoid making a copy if the requested type is already bool m = m.astype(dtype, copy=False) + # Deprecation in NumPy 2.5, 2026-03 + if warning_for_type: + warnings.warn( + (f"Cannot convert {(warning_for_type).__name__} safely to an integer." + "This will raise an error in future versions (Deprecated NumPy 2.5)"), + DeprecationWarning, + skip_file_prefixes=(os.path.dirname(__file__),), + ) + return m @@ -1132,6 +1159,20 @@ def triu_indices(n, k=0, m=None): [ 12, 13, 14, -1]]) """ + + try: + k = operator.index(k) + except TypeError: + # If same instance,then warning will be given in tri + if not isinstance(k, type(k - 1)): + # Deprecated in NumPy 2.5, 2026-03 + warnings.warn( + (f"Cannot convert {type(k).__name__} safely to an integer." + "This will raise an error in future versions (Deprecated NumPy 2.5)"), + DeprecationWarning, + skip_file_prefixes=(os.path.dirname(__file__),), + ) + tri_ = ~tri(n, m, k=k - 1, dtype=bool) return tuple(broadcast_to(inds, tri_.shape)[tri_] diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 58582119429a..af8e3d72c4d8 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -1,15 +1,6 @@ from _typeshed import Incomplete from collections.abc import Callable, Sequence -from typing import ( - Any, - Literal as L, - Never, - Protocol, - TypeAlias, - TypeVar, - overload, - type_check_only, -) +from typing import Any, Literal as L, Never, Protocol, overload, type_check_only import numpy as np from numpy import _OrderCF @@ -45,37 +36,28 @@ __all__ = [ ### -_T = TypeVar("_T") -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ComplexT = TypeVar("_ComplexT", bound=np.complexfloating) -_InexactT = TypeVar("_InexactT", bound=np.inexact) -_NumberT = TypeVar("_NumberT", bound=np.number) -_NumberObjectT = TypeVar("_NumberObjectT", bound=np.number | np.object_) -_NumberCoT = TypeVar("_NumberCoT", bound=_Number_co) - -_Int_co: TypeAlias = np.integer | np.bool -_Float_co: TypeAlias = np.floating | _Int_co -_Number_co: TypeAlias = np.number | np.bool +type _Int_co = np.integer | np.bool +type _Float_co = np.floating | _Int_co +type _Number_co = np.number | np.bool -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] -_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] # Workaround for mypy's and pyright's lack of compliance with the typing spec for # overloads for gradual types. This works because only `Any` and `Never` are assignable # to `Never`. -_ArrayNoD: TypeAlias = np.ndarray[tuple[Never] | tuple[Never, Never], np.dtype[_ScalarT]] +type _ArrayNoD[ScalarT: np.generic] = np.ndarray[tuple[Never] | tuple[Never, Never], np.dtype[ScalarT]] -_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] -_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] -_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] -_ArrayLike2DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co] -_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[np.dtype[ScalarT]] | Sequence[ScalarT] +type _ArrayLike1DInt_co = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +type _ArrayLike1DFloat_co = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +type _ArrayLike2DFloat_co = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co] +type _ArrayLike1DNumber_co = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] # The returned arrays dtype must be compatible with `np.equal` -_MaskFunc: TypeAlias = Callable[[NDArray[np.int_], _T], NDArray[_Number_co | np.timedelta64 | np.datetime64 | np.object_]] +type _MaskFunc[_T] = Callable[[NDArray[np.int_], _T], NDArray[_Number_co | np.timedelta64 | np.datetime64 | np.object_]] -_Indices2D: TypeAlias = tuple[_Array1D[np.intp], _Array1D[np.intp]] -_Histogram2D: TypeAlias = tuple[_Array1D[np.float64], _Array1D[_ScalarT], _Array1D[_ScalarT]] +type _Indices2D = tuple[_Array1D[np.intp], _Array1D[np.intp]] +type _Histogram2D[ScalarT: np.generic] = tuple[_Array2D[np.float64], _Array1D[ScalarT], _Array1D[ScalarT]] @type_check_only class _HasShapeAndNDim(Protocol): @@ -88,17 +70,17 @@ class _HasShapeAndNDim(Protocol): # keep in sync with `flipud` @overload -def fliplr(m: _ArrayT) -> _ArrayT: ... +def fliplr[ArrayT: np.ndarray](m: ArrayT) -> ArrayT: ... @overload -def fliplr(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +def fliplr[ScalarT: np.generic](m: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload def fliplr(m: ArrayLike) -> NDArray[Any]: ... # keep in sync with `fliplr` @overload -def flipud(m: _ArrayT) -> _ArrayT: ... +def flipud[ArrayT: np.ndarray](m: ArrayT) -> ArrayT: ... @overload -def flipud(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +def flipud[ScalarT: np.generic](m: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload def flipud(m: ArrayLike) -> NDArray[Any]: ... @@ -115,27 +97,27 @@ def eye( like: _SupportsArrayFunc | None = None, ) -> _Array2D[np.float64]: ... @overload -def eye( +def eye[ScalarT: np.generic]( N: int, M: int | None, k: int, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array2D[_ScalarT]: ... +) -> _Array2D[ScalarT]: ... @overload -def eye( +def eye[ScalarT: np.generic]( N: int, M: int | None = None, k: int = 0, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderCF = "C", device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array2D[_ScalarT]: ... +) -> _Array2D[ScalarT]: ... @overload def eye( N: int, @@ -150,23 +132,23 @@ def eye( # @overload -def diag(v: _ArrayNoD[_ScalarT] | Sequence[Sequence[_ScalarT]], k: int = 0) -> NDArray[_ScalarT]: ... +def diag[ScalarT: np.generic](v: _ArrayNoD[ScalarT] | Sequence[Sequence[ScalarT]], k: int = 0) -> NDArray[ScalarT]: ... @overload -def diag(v: _Array2D[_ScalarT] | Sequence[Sequence[_ScalarT]], k: int = 0) -> _Array1D[_ScalarT]: ... +def diag[ScalarT: np.generic](v: _Array2D[ScalarT] | Sequence[Sequence[ScalarT]], k: int = 0) -> _Array1D[ScalarT]: ... @overload -def diag(v: _Array1D[_ScalarT] | Sequence[_ScalarT], k: int = 0) -> _Array2D[_ScalarT]: ... +def diag[ScalarT: np.generic](v: _Array1D[ScalarT] | Sequence[ScalarT], k: int = 0) -> _Array2D[ScalarT]: ... @overload def diag(v: Sequence[Sequence[_ScalarLike_co]], k: int = 0) -> _Array1D[Incomplete]: ... @overload def diag(v: Sequence[_ScalarLike_co], k: int = 0) -> _Array2D[Incomplete]: ... @overload -def diag(v: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +def diag[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> NDArray[ScalarT]: ... @overload def diag(v: ArrayLike, k: int = 0) -> NDArray[Incomplete]: ... # keep in sync with `numpy.ma.extras.diagflat` @overload -def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> _Array2D[_ScalarT]: ... +def diagflat[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> _Array2D[ScalarT]: ... @overload def diagflat(v: ArrayLike, k: int = 0) -> _Array2D[Incomplete]: ... @@ -181,23 +163,23 @@ def tri( like: _SupportsArrayFunc | None = None ) -> _Array2D[np.float64]: ... @overload -def tri( +def tri[ScalarT: np.generic]( N: int, M: int | None, k: int, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], *, like: _SupportsArrayFunc | None = None -) -> _Array2D[_ScalarT]: ... +) -> _Array2D[ScalarT]: ... @overload -def tri( +def tri[ScalarT: np.generic]( N: int, M: int | None = None, k: int = 0, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], like: _SupportsArrayFunc | None = None -) -> _Array2D[_ScalarT]: ... +) -> _Array2D[ScalarT]: ... @overload def tri( N: int, @@ -210,23 +192,24 @@ def tri( # keep in sync with `triu` @overload -def tril(m: _ArrayT, k: int = 0) -> _ArrayT: ... +def tril[ArrayT: np.ndarray](m: ArrayT, k: int = 0) -> ArrayT: ... @overload -def tril(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +def tril[ScalarT: np.generic](m: _ArrayLike[ScalarT], k: int = 0) -> NDArray[ScalarT]: ... @overload def tril(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... # keep in sync with `tril` @overload -def triu(m: _ArrayT, k: int = 0) -> _ArrayT: ... +def triu[ArrayT: np.ndarray](m: ArrayT, k: int = 0) -> ArrayT: ... @overload -def triu(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +def triu[ScalarT: np.generic](m: _ArrayLike[ScalarT], k: int = 0) -> NDArray[ScalarT]: ... @overload def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... # we use `list` (invariant) instead of `Sequence` (covariant) to avoid overlap +# keep in sync with `ma.extras.vander` @overload -def vander(x: _ArrayLike1D[_NumberObjectT], N: int | None = None, increasing: bool = False) -> _Array2D[_NumberObjectT]: ... +def vander[ScalarT: np.number | np.object_](x: _ArrayLike1D[ScalarT], N: int | None = None, increasing: bool = False) -> _Array2D[ScalarT]: ... @overload def vander(x: _ArrayLike1D[np.bool] | list[int], N: int | None = None, increasing: bool = False) -> _Array2D[np.int_]: ... @overload @@ -238,41 +221,41 @@ def vander(x: Sequence[_NumberLike_co], N: int | None = None, increasing: bool = # @overload -def histogram2d( - x: _ArrayLike1D[_ComplexT], - y: _ArrayLike1D[_ComplexT | _Float_co], +def histogram2d[ScalarT: np.complexfloating]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT | _Float_co], bins: int | Sequence[int] = 10, range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_ComplexT]: ... +) -> _Histogram2D[ScalarT]: ... @overload -def histogram2d( - x: _ArrayLike1D[_ComplexT | _Float_co], - y: _ArrayLike1D[_ComplexT], +def histogram2d[ScalarT: np.complexfloating]( + x: _ArrayLike1D[ScalarT | _Float_co], + y: _ArrayLike1D[ScalarT], bins: int | Sequence[int] = 10, range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_ComplexT]: ... +) -> _Histogram2D[ScalarT]: ... @overload -def histogram2d( - x: _ArrayLike1D[_InexactT], - y: _ArrayLike1D[_InexactT | _Int_co], +def histogram2d[ScalarT: np.inexact]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT | _Int_co], bins: int | Sequence[int] = 10, range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_InexactT]: ... +) -> _Histogram2D[ScalarT]: ... @overload -def histogram2d( - x: _ArrayLike1D[_InexactT | _Int_co], - y: _ArrayLike1D[_InexactT], +def histogram2d[ScalarT: np.inexact]( + x: _ArrayLike1D[ScalarT | _Int_co], + y: _ArrayLike1D[ScalarT], bins: int | Sequence[int] = 10, range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_InexactT]: ... +) -> _Histogram2D[ScalarT]: ... @overload def histogram2d( x: _ArrayLike1DInt_co | Sequence[float], @@ -292,41 +275,41 @@ def histogram2d( weights: _ArrayLike1DFloat_co | None = None, ) -> _Histogram2D[np.complex128 | Any]: ... @overload -def histogram2d( +def histogram2d[ScalarT: _Number_co]( x: _ArrayLike1DNumber_co, y: _ArrayLike1DNumber_co, - bins: _ArrayLike1D[_NumberCoT] | Sequence[_ArrayLike1D[_NumberCoT]], + bins: _ArrayLike1D[ScalarT] | Sequence[_ArrayLike1D[ScalarT]], range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_NumberCoT]: ... +) -> _Histogram2D[ScalarT]: ... @overload -def histogram2d( - x: _ArrayLike1D[_InexactT], - y: _ArrayLike1D[_InexactT], - bins: Sequence[_ArrayLike1D[_NumberCoT] | int], +def histogram2d[ScalarT: np.inexact, BinsScalarT: _Number_co]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT], + bins: Sequence[_ArrayLike1D[BinsScalarT] | int], range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_InexactT | _NumberCoT]: ... +) -> _Histogram2D[ScalarT | BinsScalarT]: ... @overload -def histogram2d( - x: _ArrayLike1D[_InexactT], - y: _ArrayLike1D[_InexactT], +def histogram2d[ScalarT: np.inexact]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT], bins: Sequence[_ArrayLike1DNumber_co | int], range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_InexactT | Any]: ... +) -> _Histogram2D[ScalarT | Any]: ... @overload -def histogram2d( +def histogram2d[ScalarT: _Number_co]( x: _ArrayLike1DInt_co | Sequence[float], y: _ArrayLike1DInt_co | Sequence[float], - bins: Sequence[_ArrayLike1D[_NumberCoT] | int], + bins: Sequence[_ArrayLike1D[ScalarT] | int], range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[np.float64 | _NumberCoT]: ... +) -> _Histogram2D[np.float64 | ScalarT]: ... @overload def histogram2d( x: _ArrayLike1DInt_co | Sequence[float], @@ -337,14 +320,14 @@ def histogram2d( weights: _ArrayLike1DFloat_co | None = None, ) -> _Histogram2D[np.float64 | Any]: ... @overload -def histogram2d( +def histogram2d[ScalarT: _Number_co]( x: Sequence[complex], y: Sequence[complex], - bins: Sequence[_ArrayLike1D[_NumberCoT] | int], + bins: Sequence[_ArrayLike1D[ScalarT] | int], range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[np.complex128 | _NumberCoT]: ... +) -> _Histogram2D[np.complex128 | ScalarT]: ... @overload def histogram2d( x: Sequence[complex], @@ -397,7 +380,7 @@ def histogram2d( @overload def mask_indices(n: int, mask_func: _MaskFunc[int], k: int = 0) -> _Indices2D: ... @overload -def mask_indices(n: int, mask_func: _MaskFunc[_T], k: _T) -> _Indices2D: ... +def mask_indices[T](n: int, mask_func: _MaskFunc[T], k: T) -> _Indices2D: ... # def tril_indices(n: int, k: int = 0, m: int | None = None) -> _Indices2D: ... diff --git a/numpy/lib/_type_check_impl.py b/numpy/lib/_type_check_impl.py index 584088cdc21d..61b5b7229eb7 100644 --- a/numpy/lib/_type_check_impl.py +++ b/numpy/lib/_type_check_impl.py @@ -2,6 +2,7 @@ """ import functools +import warnings __all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex', 'isreal', 'nan_to_num', 'real', 'real_if_close', @@ -240,28 +241,22 @@ def isreal(x): Examples -------- >>> import numpy as np - >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex) + >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=np.complex128) >>> np.isreal(a) array([False, True, True, True, True, False]) The function does not work on string arrays. - >>> a = np.array([2j, "a"], dtype="U") - >>> np.isreal(a) # Warns about non-elementwise comparison - False + >>> a = np.array([2j, "a"], dtype=np.str_) + >>> np.isreal(a) # returns the result of `"" == 0` currently. + array([False, False]) - Returns True for all elements in input array of ``dtype=object`` even if - any of the elements is complex. + Returns True for all elements that either have no ``.imag`` attribute + or for which that attribute is zero: - >>> a = np.array([1, "2", 3+4j], dtype=object) + >>> a = np.array([1, "2", 3+4j], dtype=np.object_) >>> np.isreal(a) - array([ True, True, True]) - - isreal should not be used with object arrays - - >>> a = np.array([1+2j, 2+1j], dtype=object) - >>> np.isreal(a) - array([ True, True]) + array([ True, True, False]) """ return imag(x) == 0 @@ -587,6 +582,9 @@ def typename(char): """ Return a description for the given data type code. + .. deprecated:: 2.5 + `numpy.typename` is deprecated. Use `numpy.dtype.name` instead. + Parameters ---------- char : str @@ -633,6 +631,12 @@ def typename(char): q : long long integer """ + # Deprecated in NumPy 2.5, 2026-02-03 + warnings.warn( + "numpy.typename is deprecated. Use numpy.dtype.name instead.", + DeprecationWarning, + stacklevel=2 + ) return _namefromtype[char] #----------------------------------------------------------------------------- diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index 8b665cd9a400..29c78c116117 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -1,7 +1,6 @@ -from _typeshed import Incomplete from collections.abc import Container, Iterable -from typing import Any, Literal as L, Protocol, TypeAlias, overload, type_check_only -from typing_extensions import TypeVar +from typing import Any, Literal as L, Protocol, overload, type_check_only +from typing_extensions import deprecated import numpy as np from numpy._typing import ( @@ -10,9 +9,11 @@ from numpy._typing import ( _16Bit, _32Bit, _64Bit, + _AnyShape, _ArrayLike, _NestedSequence, _ScalarLike_co, + _Shape, _SupportsArray, ) @@ -30,33 +31,28 @@ __all__ = [ "typename", ] -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) -_RealT = TypeVar("_RealT", bound=np.floating | np.integer | np.bool) - -_FloatMax32: TypeAlias = np.float32 | np.float16 -_ComplexMax128: TypeAlias = np.complex128 | np.complex64 -_RealMax64: TypeAlias = np.float64 | np.float32 | np.float16 | np.integer -_Real: TypeAlias = np.floating | np.integer -_InexactMax32: TypeAlias = np.inexact[_32Bit] | np.float16 -_NumberMax64: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.number[_16Bit] | np.integer +type _FloatMax32 = np.float32 | np.float16 +type _ComplexMax128 = np.complex128 | np.complex64 +type _RealMax64 = np.float64 | np.float32 | np.float16 | np.integer +type _Real = np.floating | np.integer +type _ToReal = _Real | np.bool +type _InexactMax32 = np.inexact[_32Bit] | np.float16 +type _NumberMax64 = np.number[_64Bit] | np.number[_32Bit] | np.number[_16Bit] | np.integer @type_check_only -class _HasReal(Protocol[_T_co]): +class _HasReal[T](Protocol): @property - def real(self, /) -> _T_co: ... + def real(self, /) -> T: ... @type_check_only -class _HasImag(Protocol[_T_co]): +class _HasImag[T](Protocol): @property - def imag(self, /) -> _T_co: ... + def imag(self, /) -> T: ... @type_check_only -class _HasDType(Protocol[_ScalarT_co]): +class _HasDType[ScalarT: np.generic](Protocol): @property - def dtype(self, /) -> np.dtype[_ScalarT_co]: ... + def dtype(self, /) -> np.dtype[ScalarT]: ... ### @@ -64,17 +60,17 @@ def mintypecode(typechars: Iterable[str | ArrayLike], typeset: str | Container[s # @overload -def real(val: _HasReal[_T]) -> _T: ... # type: ignore[overload-overlap] +def real[T](val: _HasReal[T]) -> T: ... @overload -def real(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... +def real[RealT: _ToReal](val: _ArrayLike[RealT]) -> NDArray[RealT]: ... @overload def real(val: ArrayLike) -> NDArray[Any]: ... # @overload -def imag(val: _HasImag[_T]) -> _T: ... # type: ignore[overload-overlap] +def imag[T](val: _HasImag[T]) -> T: ... @overload -def imag(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... +def imag[RealT: _ToReal](val: _ArrayLike[RealT]) -> NDArray[RealT]: ... @overload def imag(val: ArrayLike) -> NDArray[Any]: ... @@ -99,144 +95,250 @@ def iscomplexobj(x: _HasDType[Any] | ArrayLike) -> bool: ... def isrealobj(x: _HasDType[Any] | ArrayLike) -> bool: ... # -@overload +@overload # np.generic | np.ndarray (`ndarray` subclasses pass through) +def nan_to_num[ScalarOrArrayT: np.generic | np.ndarray]( + x: ScalarOrArrayT, + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> ScalarOrArrayT: ... +@overload # >0-d +def nan_to_num[ScalarT: np.generic]( + x: _NestedSequence[_ArrayLike[ScalarT]], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> NDArray[ScalarT]: ... +@overload # ?-d +def nan_to_num[DTypeT: np.dtype]( + x: _SupportsArray[DTypeT], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> np.ndarray[_AnyShape, DTypeT] | Any: ... +@overload # 0-d ~bool def nan_to_num( - x: _ScalarT, + x: bool, copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> _ScalarT: ... -@overload +) -> np.bool: ... +@overload # 0-d +int def nan_to_num( - x: NDArray[_ScalarT] | _NestedSequence[_ArrayLike[_ScalarT]], + x: int, copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> NDArray[_ScalarT]: ... -@overload +) -> np.int_ | Any: ... +@overload # 0-d +float def nan_to_num( - x: _SupportsArray[np.dtype[_ScalarT]], + x: float, copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> _ScalarT | NDArray[_ScalarT]: ... -@overload +) -> np.float64 | Any: ... +@overload # 0-d +complex +def nan_to_num( + x: complex, + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> np.complex128 | Any: ... +@overload # >0-d ~bool +def nan_to_num( + x: _NestedSequence[bool], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> NDArray[np.bool]: ... +@overload # >0-d ~int +def nan_to_num( + x: _NestedSequence[list[int]] | list[int], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> NDArray[np.int_]: ... +@overload # >0-d ~float +def nan_to_num( + x: _NestedSequence[list[float]] | list[float], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> NDArray[np.float64]: ... +@overload # >0-d ~complex +def nan_to_num( + x: _NestedSequence[list[complex]] | list[complex], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> NDArray[np.complex128]: ... +@overload # >0-d def nan_to_num( x: _NestedSequence[ArrayLike], copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> NDArray[Incomplete]: ... -@overload +) -> np.ndarray: ... +@overload # ?-d def nan_to_num( x: ArrayLike, copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> Incomplete: ... +) -> np.ndarray | Any: ... -# NOTE: The [overload-overlap] mypy error is a false positive +# +@overload +def real_if_close[ShapeT: _Shape, DTypeT: np.dtype[_ToReal]]( + a: np.ndarray[ShapeT, DTypeT], + tol: float = 100, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload +def real_if_close[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex64]], + tol: float = 100, +) -> np.ndarray[ShapeT, np.dtype[np.float32 | np.complex64]]: ... +@overload +def real_if_close[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex128]], + tol: float = 100, +) -> np.ndarray[ShapeT, np.dtype[np.float64 | np.complex128]]: ... @overload -def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... # type: ignore[overload-overlap] +def real_if_close[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.clongdouble]], + tol: float = 100, +) -> np.ndarray[ShapeT, np.dtype[np.longdouble | np.clongdouble]]: ... +@overload +def real_if_close[RealT: _ToReal](a: _ArrayLike[RealT], tol: float = 100) -> NDArray[RealT]: ... +@overload +def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... @overload def real_if_close(a: _ArrayLike[np.complex128], tol: float = 100) -> NDArray[np.float64 | np.complex128]: ... @overload def real_if_close(a: _ArrayLike[np.clongdouble], tol: float = 100) -> NDArray[np.longdouble | np.clongdouble]: ... @overload -def real_if_close(a: _ArrayLike[_RealT], tol: float = 100) -> NDArray[_RealT]: ... -@overload def real_if_close(a: ArrayLike, tol: float = 100) -> NDArray[Any]: ... # @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["S1"]) -> L["character"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["?"]) -> L["bool"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["b"]) -> L["signed char"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["B"]) -> L["unsigned char"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["h"]) -> L["short"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["H"]) -> L["unsigned short"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["i"]) -> L["integer"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["I"]) -> L["unsigned integer"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["l"]) -> L["long integer"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["L"]) -> L["unsigned long integer"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["q"]) -> L["long long integer"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["Q"]) -> L["unsigned long long integer"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["f"]) -> L["single precision"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["d"]) -> L["double precision"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["g"]) -> L["long precision"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["F"]) -> L["complex single precision"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["D"]) -> L["complex double precision"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["G"]) -> L["complex long double precision"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["S"]) -> L["string"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["U"]) -> L["unicode"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["V"]) -> L["void"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["O"]) -> L["object"]: ... # NOTE: The [overload-overlap] mypy errors are false positives @overload def common_type() -> type[np.float16]: ... @overload -def common_type(a0: _HasDType[np.float16], /, *ai: _HasDType[np.float16]) -> type[np.float16]: ... # type: ignore[overload-overlap] +def common_type(a0: _HasDType[np.float16], /, *ai: _HasDType[np.float16]) -> type[np.float16]: ... @overload -def common_type(a0: _HasDType[np.float32], /, *ai: _HasDType[_FloatMax32]) -> type[np.float32]: ... # type: ignore[overload-overlap] +def common_type(a0: _HasDType[np.float32], /, *ai: _HasDType[_FloatMax32]) -> type[np.float32]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.float64 | np.integer], /, *ai: _HasDType[_RealMax64], ) -> type[np.float64]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.longdouble], /, *ai: _HasDType[_Real], ) -> type[np.longdouble]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.complex64], /, *ai: _HasDType[_InexactMax32], ) -> type[np.complex64]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.complex128], /, *ai: _HasDType[_NumberMax64], ) -> type[np.complex128]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.clongdouble], /, *ai: _HasDType[np.number], ) -> type[np.clongdouble]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[_FloatMax32], array1: _HasDType[np.float32], /, @@ -257,7 +359,7 @@ def common_type( *ai: _HasDType[_Real], ) -> type[np.longdouble]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[_InexactMax32], array1: _HasDType[np.complex64], /, diff --git a/numpy/lib/_ufunclike_impl.py b/numpy/lib/_ufunclike_impl.py index 569840697d81..0f503d03a556 100644 --- a/numpy/lib/_ufunclike_impl.py +++ b/numpy/lib/_ufunclike_impl.py @@ -5,6 +5,8 @@ """ __all__ = ['fix', 'isneginf', 'isposinf'] +import warnings + import numpy._core.numeric as nx from numpy._core.overrides import array_function_dispatch @@ -18,6 +20,10 @@ def fix(x, out=None): """ Round to nearest integer towards zero. + .. deprecated:: 2.5 + `numpy.fix` is deprecated. Use `numpy.trunc` instead, + which is faster and follows the Array API standard. + Round an array of floats element-wise to nearest integer towards zero. The rounded values have the same data-type as the input. @@ -56,6 +62,13 @@ def fix(x, out=None): array([ 2., 2., -2., -2.]) """ + # Deprecated in NumPy 2.5, 2026-01-12 + warnings.warn( + "numpy.fix is deprecated. Use numpy.trunc instead, " + "which is faster and follows the Array API standard.", + DeprecationWarning, + stacklevel=2, + ) return nx.trunc(x, out=out) diff --git a/numpy/lib/_ufunclike_impl.pyi b/numpy/lib/_ufunclike_impl.pyi index 0f00767356e0..4145ff205e1b 100644 --- a/numpy/lib/_ufunclike_impl.pyi +++ b/numpy/lib/_ufunclike_impl.pyi @@ -1,8 +1,7 @@ -from typing import Any, TypeVar, overload +from typing import overload from typing_extensions import deprecated import numpy as np -from numpy import floating, object_ from numpy._typing import ( NDArray, _ArrayLikeFloat_co, @@ -12,49 +11,31 @@ from numpy._typing import ( __all__ = ["fix", "isneginf", "isposinf"] -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) - @overload -@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) -def fix(x: _FloatLike_co, out: None = None) -> floating: ... +@deprecated("numpy.fix is deprecated. Use numpy.trunc instead.") +def fix(x: _FloatLike_co, out: None = None) -> np.floating: ... @overload -@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) -def fix(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[floating]: ... +@deprecated("numpy.fix is deprecated. Use numpy.trunc instead.") +def fix(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[np.floating]: ... @overload -@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) -def fix(x: _ArrayLikeObject_co, out: None = None) -> NDArray[object_]: ... +@deprecated("numpy.fix is deprecated. Use numpy.trunc instead.") +def fix(x: _ArrayLikeObject_co, out: None = None) -> NDArray[np.object_]: ... @overload -@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) -def fix(x: _ArrayLikeFloat_co | _ArrayLikeObject_co, out: _ArrayT) -> _ArrayT: ... +@deprecated("numpy.fix is deprecated. Use numpy.trunc instead.") +def fix[ArrayT: np.ndarray](x: _ArrayLikeFloat_co | _ArrayLikeObject_co, out: ArrayT) -> ArrayT: ... +# @overload -def isposinf( # type: ignore[misc] - x: _FloatLike_co, - out: None = None, -) -> np.bool: ... +def isposinf(x: _FloatLike_co, out: None = None) -> np.bool: ... @overload -def isposinf( - x: _ArrayLikeFloat_co, - out: None = None, -) -> NDArray[np.bool]: ... +def isposinf(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[np.bool]: ... @overload -def isposinf( - x: _ArrayLikeFloat_co, - out: _ArrayT, -) -> _ArrayT: ... +def isposinf[ArrayT: np.ndarray](x: _ArrayLikeFloat_co, out: ArrayT) -> ArrayT: ... +# @overload -def isneginf( # type: ignore[misc] - x: _FloatLike_co, - out: None = None, -) -> np.bool: ... +def isneginf(x: _FloatLike_co, out: None = None) -> np.bool: ... @overload -def isneginf( - x: _ArrayLikeFloat_co, - out: None = None, -) -> NDArray[np.bool]: ... +def isneginf(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[np.bool]: ... @overload -def isneginf( - x: _ArrayLikeFloat_co, - out: _ArrayT, -) -> _ArrayT: ... +def isneginf[ArrayT: np.ndarray](x: _ArrayLikeFloat_co, out: ArrayT) -> ArrayT: ... diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index 7f364b495450..0910e10dbde2 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -1,7 +1,7 @@ from _typeshed import Incomplete from types import EllipsisType -from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload -from typing_extensions import TypeVar, deprecated, override +from typing import Any, Generic, Self, SupportsIndex, overload, override +from typing_extensions import TypeVar, deprecated import numpy as np import numpy.typing as npt @@ -11,30 +11,25 @@ from numpy._typing import ( _ArrayLikeBool_co, _ArrayLikeInt_co, _DTypeLike, + _Shape, ) ### -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) _ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -_BoolArrayT = TypeVar("_BoolArrayT", bound=container[Any, np.dtype[np.bool]]) -_IntegralArrayT = TypeVar("_IntegralArrayT", bound=container[Any, np.dtype[np.bool | np.integer | np.object_]]) -_RealContainerT = TypeVar( - "_RealContainerT", - bound=container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]], -) -_NumericContainerT = TypeVar("_NumericContainerT", bound=container[Any, np.dtype[np.number | np.timedelta64 | np.object_]]) +type _ArrayInt_co = npt.NDArray[np.integer | np.bool] -_ArrayInt_co: TypeAlias = npt.NDArray[np.integer | np.bool] +type _BoolContainer = container[Any, np.dtype[np.bool]] # type: ignore[deprecated] +type _IntegralContainer = container[Any, np.dtype[np.bool | np.integer | np.object_]] # type: ignore[deprecated] +type _RealContainer = container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]] # type: ignore[deprecated] +type _NumericContainer = container[Any, np.dtype[np.number | np.timedelta64 | np.object_]] # type: ignore[deprecated] -_ToIndexSlice: TypeAlias = slice | EllipsisType | _ArrayInt_co | None -_ToIndexSlices: TypeAlias = _ToIndexSlice | tuple[_ToIndexSlice, ...] -_ToIndex: TypeAlias = SupportsIndex | _ToIndexSlice -_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] +type _ToIndexSlice = slice | EllipsisType | _ArrayInt_co | None +type _ToIndexSlices = _ToIndexSlice | tuple[_ToIndexSlice, ...] +type _ToIndex = SupportsIndex | _ToIndexSlice +type _ToIndices = _ToIndex | tuple[_ToIndex, ...] ### # pyright: reportDeprecated = false @@ -52,19 +47,19 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): copy: bool = True, ) -> None: ... @overload - def __init__( - self: container[Any, np.dtype[_ScalarT]], + def __init__[ScalarT: np.generic]( + self: container[Any, np.dtype[ScalarT]], /, - data: _ArrayLike[_ScalarT], + data: _ArrayLike[ScalarT], dtype: None = None, copy: bool = True, ) -> None: ... @overload - def __init__( - self: container[Any, np.dtype[_ScalarT]], + def __init__[ScalarT: np.generic]( + self: container[Any, np.dtype[ScalarT]], /, data: npt.ArrayLike, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], copy: bool = True, ) -> None: ... @overload @@ -112,20 +107,28 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with np.ndarray @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex64]], /) -> container[_ShapeT, np.dtype[np.float32]]: ... # type: ignore[overload-overlap] + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex64]], / + ) -> container[ShapeT, np.dtype[np.float32]]: ... @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex128]], /) -> container[_ShapeT, np.dtype[np.float64]]: ... + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex128]], / + ) -> container[ShapeT, np.dtype[np.float64]]: ... @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex192]], /) -> container[_ShapeT, np.dtype[np.float96]]: ... + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex192]], / + ) -> container[ShapeT, np.dtype[np.float96]]: ... @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex256]], /) -> container[_ShapeT, np.dtype[np.float128]]: ... + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex256]], / + ) -> container[ShapeT, np.dtype[np.float128]]: ... @overload - def __abs__(self: _RealContainerT, /) -> _RealContainerT: ... + def __abs__[ContainerT: _RealContainer](self: ContainerT, /) -> ContainerT: ... # - def __neg__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 - def __pos__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 - def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 + def __neg__[ContainerT: _NumericContainer](self: ContainerT, /) -> ContainerT: ... + def __pos__[ContainerT: _NumericContainer](self: ContainerT, /) -> ContainerT: ... + def __invert__[ContainerT: _IntegralContainer](self: ContainerT, /) -> ContainerT: ... # TODO(jorenham): complete these binary ops @@ -170,40 +173,34 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): # @overload - def __and__( - self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / - ) -> container[_AnyShape, np.dtype[np.bool]]: ... + def __and__(self: _BoolContainer, other: _ArrayLikeBool_co, /) -> container[_AnyShape, np.dtype[np.bool]]: ... @overload def __and__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... __rand__ = __and__ @overload - def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __iand__[ContainerT: _BoolContainer](self: ContainerT, other: _ArrayLikeBool_co, /) -> ContainerT: ... @overload def __iand__(self, other: _ArrayLikeInt_co, /) -> Self: ... # @overload - def __xor__( - self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / - ) -> container[_AnyShape, np.dtype[np.bool]]: ... + def __xor__(self: _BoolContainer, other: _ArrayLikeBool_co, /) -> container[_AnyShape, np.dtype[np.bool]]: ... @overload def __xor__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... __rxor__ = __xor__ @overload - def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __ixor__[ContainerT: _BoolContainer](self: ContainerT, other: _ArrayLikeBool_co, /) -> ContainerT: ... @overload def __ixor__(self, other: _ArrayLikeInt_co, /) -> Self: ... # @overload - def __or__( - self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / - ) -> container[_AnyShape, np.dtype[np.bool]]: ... + def __or__(self: _BoolContainer, other: _ArrayLikeBool_co, /) -> container[_AnyShape, np.dtype[np.bool]]: ... @overload def __or__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... __ror__ = __or__ @overload - def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __ior__[ContainerT: _BoolContainer](self: ContainerT, other: _ArrayLikeBool_co, /) -> ContainerT: ... @overload def __ior__(self, other: _ArrayLikeInt_co, /) -> Self: ... @@ -211,16 +208,18 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): @overload def __array__(self, /, t: None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__(self, /, t: _DTypeT) -> np.ndarray[_ShapeT_co, _DTypeT]: ... + def __array__[DTypeT: np.dtype](self, /, t: DTypeT) -> np.ndarray[_ShapeT_co, DTypeT]: ... # @overload def __array_wrap__(self, arg0: npt.ArrayLike, /) -> container[_ShapeT_co, _DTypeT_co]: ... @overload - def __array_wrap__(self, a: np.ndarray[_ShapeT, _DTypeT], c: Any = ..., s: Any = ..., /) -> container[_ShapeT, _DTypeT]: ... + def __array_wrap__[ShapeT: _Shape, DTypeT: np.dtype]( + self, a: np.ndarray[ShapeT, DTypeT], c: Any = ..., s: Any = ..., / + ) -> container[ShapeT, DTypeT]: ... # def copy(self, /) -> Self: ... def tobytes(self, /) -> bytes: ... def byteswap(self, /) -> Self: ... - def astype(self, /, typecode: _DTypeLike[_ScalarT]) -> container[_ShapeT_co, np.dtype[_ScalarT]]: ... + def astype[ScalarT: np.generic](self, /, typecode: _DTypeLike[ScalarT]) -> container[_ShapeT_co, np.dtype[ScalarT]]: ... diff --git a/numpy/lib/_utils_impl.py b/numpy/lib/_utils_impl.py index 164aa4ee3d8c..6aa1065047ef 100644 --- a/numpy/lib/_utils_impl.py +++ b/numpy/lib/_utils_impl.py @@ -1,10 +1,7 @@ -import functools import os import platform import sys -import textwrap import types -import warnings import numpy as np from numpy._core import ndarray @@ -123,73 +120,6 @@ def get_include(): return d -class _Deprecate: - """ - Decorator class to deprecate old functions. - - Refer to `deprecate` for details. - - See Also - -------- - deprecate - - """ - - def __init__(self, old_name=None, new_name=None, message=None): - self.old_name = old_name - self.new_name = new_name - self.message = message - - def __call__(self, func, *args, **kwargs): - """ - Decorator call. Refer to ``decorate``. - - """ - old_name = self.old_name - new_name = self.new_name - message = self.message - - if old_name is None: - old_name = func.__name__ - if new_name is None: - depdoc = f"`{old_name}` is deprecated!" - else: - depdoc = f"`{old_name}` is deprecated, use `{new_name}` instead!" - - if message is not None: - depdoc += "\n" + message - - @functools.wraps(func) - def newfunc(*args, **kwds): - warnings.warn(depdoc, DeprecationWarning, stacklevel=2) - return func(*args, **kwds) - - newfunc.__name__ = old_name - doc = func.__doc__ - if doc is None: - doc = depdoc - else: - lines = doc.expandtabs().split('\n') - indent = _get_indent(lines[1:]) - if lines[0].lstrip(): - # Indent the original first line to let inspect.cleandoc() - # dedent the docstring despite the deprecation notice. - doc = indent * ' ' + doc - else: - # Remove the same leading blank lines as cleandoc() would. - skip = len(lines[0]) + 1 - for line in lines[1:]: - if len(line) > indent: - break - skip += len(line) + 1 - doc = doc[skip:] - depdoc = textwrap.indent(depdoc, ' ' * indent) - doc = f'{depdoc}\n\n{doc}' - newfunc.__doc__ = doc - - return newfunc - - def _get_indent(lines): """ Determines the leading whitespace that could be removed from all the lines. @@ -204,112 +134,6 @@ def _get_indent(lines): return indent -def deprecate(*args, **kwargs): - """ - Issues a DeprecationWarning, adds warning to `old_name`'s - docstring, rebinds ``old_name.__name__`` and returns the new - function object. - - This function may also be used as a decorator. - - .. deprecated:: 2.0 - Use `~warnings.warn` with :exc:`DeprecationWarning` instead. - - Parameters - ---------- - func : function - The function to be deprecated. - old_name : str, optional - The name of the function to be deprecated. Default is None, in - which case the name of `func` is used. - new_name : str, optional - The new name for the function. Default is None, in which case the - deprecation message is that `old_name` is deprecated. If given, the - deprecation message is that `old_name` is deprecated and `new_name` - should be used instead. - message : str, optional - Additional explanation of the deprecation. Displayed in the - docstring after the warning. - - Returns - ------- - old_func : function - The deprecated function. - - Examples - -------- - Note that ``olduint`` returns a value after printing Deprecation - Warning: - - >>> olduint = np.lib.utils.deprecate(np.uint) - DeprecationWarning: `uint64` is deprecated! # may vary - >>> olduint(6) - 6 - - """ - # Deprecate may be run as a function or as a decorator - # If run as a function, we initialise the decorator class - # and execute its __call__ method. - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`deprecate` is deprecated, " - "use `warn` with `DeprecationWarning` instead. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - if args: - fn = args[0] - args = args[1:] - - return _Deprecate(*args, **kwargs)(fn) - else: - return _Deprecate(*args, **kwargs) - - -def deprecate_with_doc(msg): - """ - Deprecates a function and includes the deprecation in its docstring. - - .. deprecated:: 2.0 - Use `~warnings.warn` with :exc:`DeprecationWarning` instead. - - This function is used as a decorator. It returns an object that can be - used to issue a DeprecationWarning, by passing the to-be decorated - function as argument, this adds warning to the to-be decorated function's - docstring and returns the new function object. - - See Also - -------- - deprecate : Decorate a function such that it issues a - :exc:`DeprecationWarning` - - Parameters - ---------- - msg : str - Additional explanation of the deprecation. Displayed in the - docstring after the warning. - - Returns - ------- - obj : object - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`deprecate` is deprecated, " - "use `warn` with `DeprecationWarning` instead. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - return _Deprecate(message=msg) - - #----------------------------------------------------------------------------- @@ -524,7 +348,7 @@ def info(object=None, maxwidth=76, output=None, toplevel='numpy'): print(f"Help for {object} not found.", file=output) else: print("\n " - "*** Total of %d references found. ***" % numfound, + f"*** Total of {numfound} references found. ***", file=output ) @@ -580,73 +404,6 @@ def info(object=None, maxwidth=76, output=None, toplevel='numpy'): print(inspect.getdoc(object), file=output) -def safe_eval(source): - """ - Protected string evaluation. - - .. deprecated:: 2.0 - Use `ast.literal_eval` instead. - - Evaluate a string containing a Python literal expression without - allowing the execution of arbitrary non-literal code. - - .. warning:: - - This function is identical to :py:meth:`ast.literal_eval` and - has the same security implications. It may not always be safe - to evaluate large input strings. - - Parameters - ---------- - source : str - The string to evaluate. - - Returns - ------- - obj : object - The result of evaluating `source`. - - Raises - ------ - SyntaxError - If the code has invalid Python syntax, or if it contains - non-literal code. - - Examples - -------- - >>> np.safe_eval('1') - 1 - >>> np.safe_eval('[1, 2, 3]') - [1, 2, 3] - >>> np.safe_eval('{"foo": ("bar", 10.0)}') - {'foo': ('bar', 10.0)} - - >>> np.safe_eval('import os') - Traceback (most recent call last): - ... - SyntaxError: invalid syntax - - >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') - Traceback (most recent call last): - ... - ValueError: malformed node or string: <_ast.Call object at 0x...> - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`safe_eval` is deprecated. Use `ast.literal_eval` instead. " - "Be aware of security implications, such as memory exhaustion " - "based attacks (deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - # Local import to speed up numpy's import time. - import ast - return ast.literal_eval(source) - - def _median_nancheck(data, result, axis): """ Utility function to check median result from data for NaN values at the end diff --git a/numpy/lib/_utils_impl.pyi b/numpy/lib/_utils_impl.pyi index e73ba659a31c..87fbc3aa5c4c 100644 --- a/numpy/lib/_utils_impl.pyi +++ b/numpy/lib/_utils_impl.pyi @@ -1,22 +1,18 @@ from _typeshed import SupportsWrite from typing import LiteralString -from typing_extensions import TypeVar import numpy as np __all__ = ["get_include", "info", "show_runtime"] -_ScalarOrArrayT = TypeVar("_ScalarOrArrayT", bound=np.generic | np.ndarray) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) - -### - def get_include() -> LiteralString: ... def show_runtime() -> None: ... def info( object: object = None, maxwidth: int = 76, output: SupportsWrite[str] | None = None, toplevel: str = "numpy" ) -> None: ... -def drop_metadata(dtype: _DTypeT, /) -> _DTypeT: ... +def drop_metadata[DTypeT: np.dtype](dtype: DTypeT, /) -> DTypeT: ... # used internally by `lib._function_base_impl._median` -def _median_nancheck(data: np.ndarray, result: _ScalarOrArrayT, axis: int) -> _ScalarOrArrayT: ... +def _median_nancheck[ScalarOrArrayT: np.generic | np.ndarray]( + data: np.ndarray, result: ScalarOrArrayT, axis: int +) -> ScalarOrArrayT: ... diff --git a/numpy/lib/_version.pyi b/numpy/lib/_version.pyi index c53ef795f926..7d5b03f0cc25 100644 --- a/numpy/lib/_version.pyi +++ b/numpy/lib/_version.pyi @@ -1,17 +1,22 @@ +from typing import Final + __all__ = ["NumpyVersion"] class NumpyVersion: - vstring: str - version: str - major: int - minor: int - bugfix: int - pre_release: str - is_devversion: bool - def __init__(self, vstring: str) -> None: ... - def __lt__(self, other: str | NumpyVersion) -> bool: ... - def __le__(self, other: str | NumpyVersion) -> bool: ... - def __eq__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override] - def __ne__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override] - def __gt__(self, other: str | NumpyVersion) -> bool: ... - def __ge__(self, other: str | NumpyVersion) -> bool: ... + __module__ = "numpy.lib" + + vstring: Final[str] + version: Final[str] + major: Final[int] + minor: Final[int] + bugfix: Final[int] + pre_release: Final[str] + is_devversion: Final[bool] + + def __init__(self, /, vstring: str) -> None: ... + def __lt__(self, other: str | NumpyVersion, /) -> bool: ... + def __le__(self, other: str | NumpyVersion, /) -> bool: ... + def __eq__(self, other: str | NumpyVersion, /) -> bool: ... # type: ignore[override] + def __ne__(self, other: str | NumpyVersion, /) -> bool: ... # type: ignore[override] + def __gt__(self, other: str | NumpyVersion, /) -> bool: ... + def __ge__(self, other: str | NumpyVersion, /) -> bool: ... diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py index c8a6dd818e96..8603f7b81a46 100644 --- a/numpy/lib/recfunctions.py +++ b/numpy/lib/recfunctions.py @@ -326,7 +326,7 @@ def _izip_records(seqarrays, fill_value=None, flatten=True): def _fix_output(output, usemask=True, asrecarray=False): """ - Private function: return a recarray, a ndarray, a MaskedArray + Private function: return a recarray, an ndarray, a MaskedArray or a MaskedRecords depending on the input parameters """ if not isinstance(output, ma.MaskedArray): @@ -1334,7 +1334,7 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, Dictionary mapping field names to the corresponding default values. usemask : {True, False}, optional Whether to return a MaskedArray (or MaskedRecords is - `asrecarray==True`) or a ndarray. + `asrecarray==True`) or an ndarray. asrecarray : {False, True}, optional Whether to return a recarray (or MaskedRecords if `usemask==True`) or just a flexible-type ndarray. @@ -1514,7 +1514,7 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', Dictionary mapping field names to the corresponding default values. usemask : {True, False}, optional Whether to return a MaskedArray (or MaskedRecords is - `asrecarray==True`) or a ndarray. + `asrecarray==True`) or an ndarray. asrecarray : {False, True}, optional Whether to return a recarray (or MaskedRecords if `usemask==True`) or just a flexible-type ndarray. @@ -1532,9 +1532,9 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', # Check jointype if jointype not in ('inner', 'outer', 'leftouter'): raise ValueError( - "The 'jointype' argument should be in 'inner', " - "'outer' or 'leftouter' (got '%s' instead)" % jointype - ) + "The 'jointype' argument should be in 'inner', " + f"'outer' or 'leftouter' (got '{jointype}' instead)" + ) # If we have a single key, put it in a tuple if isinstance(key, str): key = (key,) diff --git a/numpy/lib/recfunctions.pyi b/numpy/lib/recfunctions.pyi index 33713cf16331..3ba63bdb91dd 100644 --- a/numpy/lib/recfunctions.pyi +++ b/numpy/lib/recfunctions.pyi @@ -1,11 +1,11 @@ from _typeshed import Incomplete from collections.abc import Callable, Iterable, Mapping, Sequence -from typing import Any, Literal, TypeAlias, overload -from typing_extensions import TypeVar +from typing import Any, Literal, overload import numpy as np import numpy.typing as npt -from numpy._typing import _AnyShape, _DTypeLike, _DTypeLikeVoid +from numpy import _CastingKind +from numpy._typing import _AnyShape, _DTypeLike, _DTypeLikeVoid, _Shape from numpy.ma.mrecords import MaskedRecords __all__ = [ @@ -32,26 +32,18 @@ __all__ = [ "unstructured_to_structured", ] -_T = TypeVar("_T") -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_ArrayT = TypeVar("_ArrayT", bound=npt.NDArray[Any]) -_VoidArrayT = TypeVar("_VoidArrayT", bound=npt.NDArray[np.void]) -_NonVoidDTypeT = TypeVar("_NonVoidDTypeT", bound=_NonVoidDType) +type _OneOrMany[T] = T | Iterable[T] +type _BuiltinSequence[T] = tuple[T, ...] | list[T] -_OneOrMany: TypeAlias = _T | Iterable[_T] -_BuiltinSequence: TypeAlias = tuple[_T, ...] | list[_T] +type _NestedNames = tuple[str | _NestedNames, ...] +type _NonVoid = np.bool | np.number | np.character | np.datetime64 | np.timedelta64 | np.object_ +type _NonVoidDType = np.dtype[_NonVoid] | np.dtypes.StringDType -_NestedNames: TypeAlias = tuple[str | _NestedNames, ...] -_NonVoid: TypeAlias = np.bool | np.number | np.character | np.datetime64 | np.timedelta64 | np.object_ -_NonVoidDType: TypeAlias = np.dtype[_NonVoid] | np.dtypes.StringDType - -_JoinType: TypeAlias = Literal["inner", "outer", "leftouter"] +type _JoinType = Literal["inner", "outer", "leftouter"] ### -def recursive_fill_fields(input: npt.NDArray[np.void], output: _VoidArrayT) -> _VoidArrayT: ... +def recursive_fill_fields[VoidArrayT: npt.NDArray[np.void]](input: npt.NDArray[np.void], output: VoidArrayT) -> VoidArrayT: ... # def get_names(adtype: np.dtype[np.void]) -> _NestedNames: ... @@ -59,7 +51,7 @@ def get_names_flat(adtype: np.dtype[np.void]) -> tuple[str, ...]: ... # @overload -def flatten_descr(ndtype: _NonVoidDTypeT) -> tuple[tuple[Literal[""], _NonVoidDTypeT]]: ... +def flatten_descr[NonVoidDTypeT: _NonVoidDType](ndtype: NonVoidDTypeT) -> tuple[tuple[Literal[""], NonVoidDTypeT]]: ... @overload def flatten_descr(ndtype: np.dtype[np.void]) -> tuple[tuple[str, np.dtype]]: ... @@ -72,13 +64,13 @@ def get_fieldstructure( # @overload -def merge_arrays( - seqarrays: Sequence[np.ndarray[_ShapeT, np.dtype]] | np.ndarray[_ShapeT, np.dtype], +def merge_arrays[ShapeT: _Shape]( + seqarrays: Sequence[np.ndarray[ShapeT, np.dtype]] | np.ndarray[ShapeT, np.dtype], fill_value: float = -1, flatten: bool = False, usemask: bool = False, asrecarray: bool = False, -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload def merge_arrays( seqarrays: Sequence[npt.ArrayLike] | np.void, @@ -90,64 +82,64 @@ def merge_arrays( # @overload -def drop_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], drop_names: str | Iterable[str], usemask: bool = True, asrecarray: Literal[False] = False, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... @overload -def drop_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], drop_names: str | Iterable[str], usemask: bool, asrecarray: Literal[True], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload -def drop_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], drop_names: str | Iterable[str], usemask: bool = True, *, asrecarray: Literal[True], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... # @overload -def rename_fields( - base: MaskedRecords[_ShapeT, np.dtype[np.void]], +def rename_fields[ShapeT: _Shape]( + base: MaskedRecords[ShapeT, np.dtype[np.void]], namemapper: Mapping[str, str], -) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +) -> MaskedRecords[ShapeT, np.dtype[np.void]]: ... @overload -def rename_fields( - base: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], +def rename_fields[ShapeT: _Shape]( + base: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], namemapper: Mapping[str, str], -) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... @overload -def rename_fields( - base: np.recarray[_ShapeT, np.dtype[np.void]], +def rename_fields[ShapeT: _Shape]( + base: np.recarray[ShapeT, np.dtype[np.void]], namemapper: Mapping[str, str], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload -def rename_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def rename_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], namemapper: Mapping[str, str], -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... # @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None, fill_value: int, usemask: Literal[False], asrecarray: Literal[False] = False, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, @@ -155,20 +147,20 @@ def append_fields( *, usemask: Literal[False], asrecarray: Literal[False] = False, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None, fill_value: int, usemask: Literal[False], asrecarray: Literal[True], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, @@ -176,30 +168,30 @@ def append_fields( *, usemask: Literal[False], asrecarray: Literal[True], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, fill_value: int = -1, usemask: Literal[True] = True, asrecarray: Literal[False] = False, -) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None, fill_value: int, usemask: Literal[True], asrecarray: Literal[True], -) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +) -> MaskedRecords[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, @@ -207,46 +199,46 @@ def append_fields( usemask: Literal[True] = True, *, asrecarray: Literal[True], -) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +) -> MaskedRecords[ShapeT, np.dtype[np.void]]: ... # -def rec_drop_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def rec_drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], drop_names: str | Iterable[str], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... # -def rec_append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def rec_append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, -) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... # TODO(jorenham): Stop passing `void` directly once structured dtypes are implemented, # e.g. using a `TypeVar` with constraints. # https://github.com/numpy/numtype/issues/92 @overload -def repack_fields(a: _DTypeT, align: bool = False, recurse: bool = False) -> _DTypeT: ... +def repack_fields[DTypeT: np.dtype](a: DTypeT, align: bool = False, recurse: bool = False) -> DTypeT: ... @overload -def repack_fields(a: _ScalarT, align: bool = False, recurse: bool = False) -> _ScalarT: ... +def repack_fields[ScalarT: np.generic](a: ScalarT, align: bool = False, recurse: bool = False) -> ScalarT: ... @overload -def repack_fields(a: _ArrayT, align: bool = False, recurse: bool = False) -> _ArrayT: ... +def repack_fields[ArrayT: np.ndarray](a: ArrayT, align: bool = False, recurse: bool = False) -> ArrayT: ... # TODO(jorenham): Attempt shape-typing (return type has ndim == arr.ndim + 1) @overload -def structured_to_unstructured( +def structured_to_unstructured[ScalarT: np.generic]( arr: npt.NDArray[np.void], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], copy: bool = False, - casting: np._CastingKind = "unsafe", -) -> npt.NDArray[_ScalarT]: ... + casting: _CastingKind = "unsafe", +) -> npt.NDArray[ScalarT]: ... @overload def structured_to_unstructured( arr: npt.NDArray[np.void], dtype: npt.DTypeLike | None = None, copy: bool = False, - casting: np._CastingKind = "unsafe", + casting: _CastingKind = "unsafe", ) -> npt.NDArray[Any]: ... # @@ -280,29 +272,29 @@ def unstructured_to_structured( ) -> npt.NDArray[np.void]: ... # -def apply_along_fields( - func: Callable[[np.ndarray[_ShapeT, Any]], npt.NDArray[Any]], - arr: np.ndarray[_ShapeT, np.dtype[np.void]], -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +def apply_along_fields[ShapeT: _Shape]( + func: Callable[[np.ndarray[ShapeT]], np.ndarray], + arr: np.ndarray[ShapeT, np.dtype[np.void]], +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... # def assign_fields_by_name(dst: npt.NDArray[np.void], src: npt.NDArray[np.void], zero_unassigned: bool = True) -> None: ... # -def require_fields( - array: np.ndarray[_ShapeT, np.dtype[np.void]], +def require_fields[ShapeT: _Shape]( + array: np.ndarray[ShapeT, np.dtype[np.void]], required_dtype: _DTypeLikeVoid, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... # TODO(jorenham): Attempt shape-typing @overload -def stack_arrays( - arrays: _ArrayT, +def stack_arrays[ArrayT: np.ndarray]( + arrays: ArrayT, defaults: Mapping[str, object] | None = None, usemask: bool = True, asrecarray: bool = False, autoconvert: bool = False, -) -> _ArrayT: ... +) -> ArrayT: ... @overload def stack_arrays( arrays: Sequence[npt.NDArray[Any]], @@ -357,27 +349,27 @@ def stack_arrays( # @overload -def find_duplicates( - a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], +def find_duplicates[ShapeT: _Shape]( + a: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], key: str | None = None, ignoremask: bool = True, return_index: Literal[False] = False, -) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... @overload -def find_duplicates( - a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], +def find_duplicates[ShapeT: _Shape]( + a: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], key: str | None, ignoremask: bool, return_index: Literal[True], -) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... +) -> tuple[np.ma.MaskedArray[ShapeT, np.dtype[np.void]], np.ndarray[ShapeT, np.dtype[np.int_]]]: ... @overload -def find_duplicates( - a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], +def find_duplicates[ShapeT: _Shape]( + a: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], key: str | None = None, ignoremask: bool = True, *, return_index: Literal[True], -) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... +) -> tuple[np.ma.MaskedArray[ShapeT, np.dtype[np.void]], np.ndarray[ShapeT, np.dtype[np.int_]]]: ... # @overload diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py index 2dd19410bbf0..dece6823f09f 100644 --- a/numpy/lib/tests/test__datasource.py +++ b/numpy/lib/tests/test__datasource.py @@ -289,7 +289,7 @@ def test_RemoveHTTPFile(self, tmp_path): def test_CachedHTTPFile(self, tmp_path): localfile = valid_httpurl() - # Create a locally cached temp file with an URL based + # Create a locally cached temp file with a URL based # directory structure. This is similar to what Repository.open # would do. repos = datasource.Repository(valid_baseurl(), tmp_path) diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 4e8d503427de..4ecbeef953c4 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -637,46 +637,46 @@ def check_all(self, a, b, i1, i2, c, dt): msg = base_msg.format('values', dt) v = unique(a) assert_array_equal(v, b, msg) - assert type(v) == type(b) + assert type(v) is type(b) msg = base_msg.format('return_index', dt) v, j = unique(a, True, False, False) assert_array_equal(v, b, msg) assert_array_equal(j, i1, msg) - assert type(v) == type(b) + assert type(v) is type(b) msg = base_msg.format('return_inverse', dt) v, j = unique(a, False, True, False) assert_array_equal(v, b, msg) assert_array_equal(j, i2, msg) - assert type(v) == type(b) + assert type(v) is type(b) msg = base_msg.format('return_counts', dt) v, j = unique(a, False, False, True) assert_array_equal(v, b, msg) assert_array_equal(j, c, msg) - assert type(v) == type(b) + assert type(v) is type(b) msg = base_msg.format('return_index and return_inverse', dt) v, j1, j2 = unique(a, True, True, False) assert_array_equal(v, b, msg) assert_array_equal(j1, i1, msg) assert_array_equal(j2, i2, msg) - assert type(v) == type(b) + assert type(v) is type(b) msg = base_msg.format('return_index and return_counts', dt) v, j1, j2 = unique(a, True, False, True) assert_array_equal(v, b, msg) assert_array_equal(j1, i1, msg) assert_array_equal(j2, c, msg) - assert type(v) == type(b) + assert type(v) is type(b) msg = base_msg.format('return_inverse and return_counts', dt) v, j1, j2 = unique(a, False, True, True) assert_array_equal(v, b, msg) assert_array_equal(j1, i2, msg) assert_array_equal(j2, c, msg) - assert type(v) == type(b) + assert type(v) is type(b) msg = base_msg.format(('return_index, return_inverse ' 'and return_counts'), dt) @@ -685,9 +685,10 @@ def check_all(self, a, b, i1, i2, c, dt): assert_array_equal(j1, i1, msg) assert_array_equal(j2, i2, msg) assert_array_equal(j3, c, msg) - assert type(v) == type(b) + assert type(v) is type(b) def get_types(self): + types = [] types.extend(np.typecodes['AllInteger']) types.extend(np.typecodes['AllFloat']) @@ -695,6 +696,7 @@ def get_types(self): types.append('timedelta64[D]') return types + @pytest.mark.filterwarnings(r"ignore:\w+ chararray \w+:DeprecationWarning") def test_unique_1d(self): a = [5, 7, 1, 2, 1, 5, 7] * 10 @@ -783,7 +785,7 @@ def test_unique_1d(self): assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) # test for ticket 2111 - datetime64 - nat = np.datetime64('nat') + nat = np.datetime64('nat', 'D') a = [np.datetime64('2020-12-26'), nat, np.datetime64('2020-12-24'), nat] ua = [np.datetime64('2020-12-24'), np.datetime64('2020-12-26'), nat] ua_idx = [2, 0, 1] @@ -795,7 +797,7 @@ def test_unique_1d(self): assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) # test for ticket 2111 - timedelta - nat = np.timedelta64('nat') + nat = np.timedelta64('nat', 's') a = [np.timedelta64(1, 'D'), nat, np.timedelta64(1, 'h'), nat] ua = [np.timedelta64(1, 'h'), np.timedelta64(1, 'D'), nat] ua_idx = [2, 0, 1] diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index c70e3d5ebd43..13429e988daf 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -72,8 +72,10 @@ >>> >>> >>> NbufferT = [ - ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), - ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), + ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', + ... ('NN', 6j), [[6.,4.],[6.,4.]], 8), + ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', + ... ('OO', 7j), [[7.,5.],[7.,5.]], 9), ... ] >>> >>> @@ -110,7 +112,8 @@ >>> for arr in basic_arrays + record_arrays: ... f = BytesIO() - ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it + ... # XXX: arr is not a dict, items gets called on it + ... format.write_array_header_1_0(f, arr) ... print(repr(f.getvalue())) ... "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" @@ -273,7 +276,7 @@ "\x16\x02{'descr': [('x', 'i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" -''' +''' # noqa: E501 import os import sys import warnings @@ -285,7 +288,6 @@ from numpy.lib import format from numpy.testing import ( IS_64BIT, - IS_PYPY, IS_WASM, assert_, assert_array_equal, @@ -552,16 +554,6 @@ def test_load_padded_dtype(tmpdir, dt): assert_array_equal(arr, arr1) -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="see gh-23988") -@pytest.mark.xfail(IS_WASM, reason="Emscripten NODEFS has a buggy dup") -def test_python2_python3_interoperability(): - fname = 'win64python2.npy' - path = os.path.join(os.path.dirname(__file__), 'data', fname) - with pytest.warns(UserWarning, match="Reading.*this warning\\."): - data = np.load(path) - assert_array_equal(data, np.ones(2)) - - @pytest.mark.filterwarnings( "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") def test_pickle_python2_python3(): @@ -685,7 +677,7 @@ def test_descr_to_dtype(dt): def test_version_2_0(): f = BytesIO() # requires more than 2 byte for header - dt = [(("%d" % i) * 100, float) for i in range(500)] + dt = [(f"{i}" * 100, float) for i in range(500)] d = np.ones(1000, dtype=dt) format.write_array(f, d, version=(2, 0)) @@ -710,7 +702,7 @@ def test_version_2_0(): @pytest.mark.skipif(IS_WASM, reason="memmap doesn't work correctly") def test_version_2_0_memmap(tmpdir): # requires more than 2 byte for header - dt = [(("%d" % i) * 100, float) for i in range(500)] + dt = [(f"{i}" * 100, float) for i in range(500)] d = np.ones(1000, dtype=dt) tf1 = os.path.join(tmpdir, 'version2_01.npy') tf2 = os.path.join(tmpdir, 'version2_02.npy') @@ -953,7 +945,6 @@ def test_large_file_support(tmpdir): assert_array_equal(r, d) -@pytest.mark.skipif(IS_PYPY, reason="flaky on PyPy") @pytest.mark.skipif(not IS_64BIT, reason="test requires 64-bit system") @pytest.mark.slow @requires_memory(free_bytes=2 * 2**30) @@ -1035,8 +1026,6 @@ def test_header_growth_axis(): float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]}) ]}), ]) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_metadata_dtype(dt): # gh-14142 arr = np.ones(10, dtype=dt) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 5b8b0adbdd0d..ac07c2388c3d 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -497,11 +497,19 @@ class subclass(np.ndarray): assert_equal(type(np.average(a)), subclass) assert_equal(type(np.average(a, weights=w)), subclass) + # Ensure a possibly returned sum of weights is correct too. + ra, rw = np.average(a, weights=w, returned=True) + assert_equal(type(ra), subclass) + assert_equal(type(rw), subclass) + # Even if it needs to be broadcast. + ra, rw = np.average(a, weights=w[0], axis=1, returned=True) + assert_equal(type(ra), subclass) + assert_equal(type(rw), subclass) def test_upcasting(self): - typs = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'), + types = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'), ('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')] - for at, wt, rt in typs: + for at, wt, rt in types: a = np.array([[1, 2], [3, 4]], dtype=at) w = np.array([[1, 2], [3, 4]], dtype=wt) assert_equal(np.average(a, weights=w).dtype, np.dtype(rt)) @@ -2794,6 +2802,12 @@ def test_sparse(self): assert_array_equal(X, np.array([[1, 2, 3]])) assert_array_equal(Y, np.array([[4], [5], [6], [7]])) + def test_always_tuple(self): + A = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True, copy=False) + B = meshgrid([], sparse=True, copy=False) + assert isinstance(A, tuple) + assert isinstance(B, tuple) + def test_invalid_arguments(self): # Test that meshgrid complains about invalid arguments # Regression test for issue #4755: @@ -3057,6 +3071,11 @@ def test_error_not_1d(self, vals): with assert_raises(ValueError): np.bincount(vals) + @pytest.mark.parametrize("vals", [[1.0], [1j], ["1"], [b"1"]]) + def test_error_not_int(self, vals): + with assert_raises(TypeError): + np.bincount(vals) + @pytest.mark.parametrize("dt", np.typecodes["AllInteger"]) def test_gh_28354(self, dt): a = np.array([0, 1, 1, 3, 2, 1, 7], dtype=dt) @@ -3283,6 +3302,11 @@ def test_period(self): 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher', 'midpoint'] +# Note: Technically, averaged_inverted_cdf and midpoint are not interpolated. +# but NumPy doesn't currently make a difference (at least w.r.t. to promotion). +interpolating_quantile_methods = [ + 'averaged_inverted_cdf', 'interpolated_inverted_cdf', 'hazen', 'weibull', + 'linear', 'median_unbiased', 'normal_unbiased', 'midpoint'] methods_supporting_weights = ["inverted_cdf"] @@ -3913,6 +3937,25 @@ def test_percentile_gh_29003_Fraction(self): assert z == one assert np.array(z).dtype == a.dtype + @pytest.mark.parametrize("method", interpolating_quantile_methods) + @pytest.mark.parametrize("q", [50, 10.0]) + def test_q_weak_promotion(self, method, q): + a = np.array([1, 2, 3, 4, 5], dtype=np.float32) + value = np.percentile(a, q, method=method) + assert value.dtype == np.float32 + + @pytest.mark.parametrize("method", interpolating_quantile_methods) + def test_q_strong_promotion(self, method): + # For interpolating methods, the dtype should be float64, for + # discrete ones the original int8. (technically, mid-point has no + # reason to take into account `q`, but does so anyway.) + a = np.array([1, 2, 3, 4, 5], dtype=np.float32) + value = np.percentile(a, np.float64(50), method=method) + assert value.dtype == np.float64 + # Check that we don't do accidental promotion either: + value = np.percentile(a, np.float32(50), method=method) + assert value.dtype == np.float32 + class TestQuantile: # most of this is already tested by TestPercentile @@ -4228,6 +4271,17 @@ def test_quantile_with_weights_and_axis(self, method): ) assert_allclose(q, q_res) + # axis is a tuple of all axes + q = np.quantile(y, alpha, weights=w, method=method, axis=(0, 1, 2)) + q_res = np.quantile(y, alpha, weights=w, method=method, axis=None) + assert_allclose(q, q_res) + + q = np.quantile(y, alpha, weights=w, method=method, axis=(1, 2)) + q_res = np.zeros(shape=(2,)) + for i in range(2): + q_res[i] = np.quantile(y[i], alpha, weights=w[i], method=method) + assert_allclose(q, q_res) + @pytest.mark.parametrize("method", methods_supporting_weights) def test_quantile_weights_min_max(self, method): # Test weighted quantile at 0 and 1 with leading and trailing zero @@ -4324,6 +4378,25 @@ def test_float16_gh_29003(self): assert value == q * 50_000 assert value.dtype == np.float16 + @pytest.mark.parametrize("method", interpolating_quantile_methods) + @pytest.mark.parametrize("q", [0.5, 1]) + def test_q_weak_promotion(self, method, q): + a = np.array([1, 2, 3, 4, 5], dtype=np.float32) + value = np.quantile(a, q, method=method) + assert value.dtype == np.float32 + + @pytest.mark.parametrize("method", interpolating_quantile_methods) + def test_q_strong_promotion(self, method): + # For interpolating methods, the dtype should be float64, for + # discrete ones the original int8. (technically, mid-point has no + # reason to take into account `q`, but does so anyway.) + a = np.array([1, 2, 3, 4, 5], dtype=np.float32) + value = np.quantile(a, np.float64(0.5), method=method) + assert value.dtype == np.float64 + # Check that we don't do accidental promotion either: + value = np.quantile(a, np.float32(0.5), method=method) + assert value.dtype == np.float32 + class TestLerp: @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False, diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 5ba634b9f612..e3ae359c7d16 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -1,4 +1,3 @@ -import gc import gzip import locale import os @@ -23,11 +22,9 @@ from numpy.exceptions import VisibleDeprecationWarning from numpy.lib import _npyio_impl from numpy.lib._iotools import ConversionWarning, ConverterError -from numpy.lib._npyio_impl import recfromcsv, recfromtxt from numpy.ma.testutils import assert_equal from numpy.testing import ( HAS_REFCOUNT, - IS_PYPY, IS_WASM, assert_, assert_allclose, @@ -36,7 +33,6 @@ assert_no_warnings, assert_raises, assert_raises_regex, - break_cycles, tempdir, temppath, ) @@ -186,7 +182,7 @@ def test_record(self): @pytest.mark.slow def test_format_2_0(self): - dt = [(("%d" % i) * 100, float) for i in range(500)] + dt = [(f"{i}" * 100, float) for i in range(500)] a = np.ones(1000, dtype=dt) with warnings.catch_warnings(record=True): warnings.filterwarnings('always', '', UserWarning) @@ -206,7 +202,7 @@ def roundtrip(self, *args, **kwargs): arr, arr_reloaded = RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) try: for n, a in enumerate(arr): - reloaded = arr_reloaded['arr_%d' % n] + reloaded = arr_reloaded[f'arr_{n}'] assert_equal(a, reloaded) assert_equal(a.dtype, reloaded.dtype) assert_equal(a.flags.fnc, reloaded.flags.fnc) @@ -232,7 +228,6 @@ def test_load_non_npy(self): assert len(npz["test2"]) == 10 assert npz["metadata"] == b"Name: Test" - @pytest.mark.skipif(IS_PYPY, reason="Hangs on PyPy") @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") @pytest.mark.slow @pytest.mark.thread_unsafe(reason="crashes with low memory") @@ -321,7 +316,6 @@ def test_not_closing_opened_fid(self): fp.seek(0) assert_(not fp.closed) - @pytest.mark.slow_pypy def test_closing_fid(self): # Test that issue #1517 (too many opened files) remains closed # It might be a "weak" test since failed to get triggered on @@ -339,14 +333,7 @@ def test_closing_fid(self): # TODO: specify exact message warnings.simplefilter('ignore', ResourceWarning) for i in range(1, 1025): - try: - np.load(tmp)["data"] - except Exception as e: - msg = f"Failed to load data from a file: {e}" - raise AssertionError(msg) - finally: - if IS_PYPY: - gc.collect() + np.load(tmp)["data"] def test_closing_zipfile_after_load(self): # Check that zipfile owns file and can close it. This needs to @@ -624,7 +611,7 @@ def test_unicode_and_bytes_fmt(self, iotype): np.savetxt(s, a, fmt="%f") s.seek(0) if iotype is StringIO: - assert_equal(s.read(), "%f\n" % 1.) + assert_equal(s.read(), f"{1.:f}\n") else: assert_equal(s.read(), b"%f\n" % 1.) @@ -647,7 +634,7 @@ def check_large_zip(memoryerror_raised): except MemoryError: memoryerror_raised.value = True raise - # run in a subprocess to ensure memory is released on PyPy, see gh-15775 + # run in a subprocess to ensure memory is released # Use an object in shared memory to re-raise the MemoryError exception # in our process if needed, see gh-16889 memoryerror_raised = Value(c_bool) @@ -843,8 +830,6 @@ def test_comments_multiple(self): a = np.array([[1, 2, 3], [4, 5, 6]], int) assert_array_equal(x, a) - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_comments_multi_chars(self): c = TextIO() c.write('/* comment\n1,2,3,5\n') @@ -1034,7 +1019,7 @@ def test_dtype_with_object(self): def test_uint64_type(self): tgt = (9223372043271415339, 9223372043271415853) c = TextIO() - c.write("%s %s" % tgt) + c.write(f'{tgt[0]} {tgt[1]}') c.seek(0) res = np.loadtxt(c, dtype=np.uint64) assert_equal(res, tgt) @@ -1042,7 +1027,7 @@ def test_uint64_type(self): def test_int64_type(self): tgt = (-9223372036854775807, 9223372036854775807) c = TextIO() - c.write("%s %s" % tgt) + c.write(f'{tgt[0]} {tgt[1]}') c.seek(0) res = np.loadtxt(c, dtype=np.int64) assert_equal(res, tgt) @@ -1061,8 +1046,6 @@ def test_from_float_hex(self): c, dtype=dt, converters=float.fromhex, encoding="latin1") assert_equal(res, tgt, err_msg=f"{dt}") - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_default_float_converter_no_default_hex_conversion(self): """ Ensure that fromhex is only used for values with the correct prefix and @@ -1073,8 +1056,6 @@ def test_default_float_converter_no_default_hex_conversion(self): match=".*convert string 'a' to float64 at row 0, column 1"): np.loadtxt(c) - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_default_float_converter_exception(self): """ Ensure that the exception message raised during failed floating point @@ -1088,7 +1069,7 @@ def test_default_float_converter_exception(self): def test_from_complex(self): tgt = (complex(1, 1), complex(1, -1)) c = TextIO() - c.write("%s %s" % tgt) + c.write(f'{tgt[0]} {tgt[1]}') c.seek(0) res = np.loadtxt(c, dtype=complex) assert_equal(res, tgt) @@ -1186,7 +1167,7 @@ def test_ndmin_keyword(self): def test_generator_source(self): def count(): for i in range(10): - yield "%d" % i + yield f"{i}" res = np.loadtxt(count()) assert_array_equal(res, np.arange(10)) @@ -1288,20 +1269,16 @@ def test_max_rows_empty_lines(self, skip, data): if callable(data): data = data() - with pytest.warns(UserWarning, - match=f"Input line 3.*max_rows={3 - skip}"): - res = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", - max_rows=3 - skip) - assert_array_equal(res, [[-1, 0], [1, 2], [3, 4]][skip:]) + res = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", + max_rows=3 - skip) + assert_array_equal(res, [[-1, 0], [1, 2], [3, 4]][skip:]) if isinstance(data, StringIO): data.seek(0) - - with warnings.catch_warnings(): - warnings.simplefilter("error", UserWarning) - with pytest.raises(UserWarning): - np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", - max_rows=3 - skip) + # gh-31113 old test checked the warning twice on `StringIO` inputs + x = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", + max_rows=3 - skip) + assert_array_equal(x, [[-1, 0], [1, 2], [3, 4]][skip:]) class Testfromregex: def test_record(self): @@ -1688,21 +1665,20 @@ def test_dtype_with_converters(self): control = np.array([2009., 23., 46],) assert_equal(test, control) - @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") def test_dtype_with_converters_and_usecols(self): dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n" dmap = {'1:1': 0, '1:n': 1, 'm:1': 2, 'm:n': 3} dtyp = [('e1', 'i4'), ('e2', 'i4'), ('e3', 'i2'), ('n', 'i1')] conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} - test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', - names=None, converters=conv, encoding="bytes") + test = np.genfromtxt(TextIO(dstr,), dtype=dtyp, delimiter=',', + names=None, converters=conv, encoding="bytes") control = np.rec.array([(1, 5, -1, 0), (2, 8, -1, 1), (3, 3, -2, 3)], dtype=dtyp) assert_equal(test, control) dtyp = [('e1', 'i4'), ('e2', 'i4'), ('n', 'i1')] - test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', - usecols=(0, 1, 3), names=None, converters=conv, - encoding="bytes") + test = np.genfromtxt(TextIO(dstr,), dtype=dtyp, delimiter=',', + usecols=(0, 1, 3), names=None, converters=conv, + encoding="bytes") control = np.rec.array([(1, 5, 0), (2, 8, 1), (3, 3, 3)], dtype=dtyp) assert_equal(test, control) @@ -2339,69 +2315,6 @@ def test_utf8_file_nodtype_unicode(self): dtype=np.str_) assert_array_equal(test, ctl) - @pytest.mark.filterwarnings("ignore:.*recfromtxt.*:DeprecationWarning") - def test_recfromtxt(self): - # - data = TextIO('A,B\n0,1\n2,3') - kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True} - test = recfromtxt(data, **kwargs) - control = np.array([(0, 1), (2, 3)], - dtype=[('A', int), ('B', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - # - data = TextIO('A,B\n0,1\n2,N/A') - test = recfromtxt(data, dtype=None, usemask=True, **kwargs) - control = ma.array([(0, 1), (2, -1)], - mask=[(False, False), (False, True)], - dtype=[('A', int), ('B', int)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - assert_equal(test.A, [0, 2]) - - @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") - def test_recfromcsv(self): - # - data = TextIO('A,B\n0,1\n2,3') - kwargs = {"missing_values": "N/A", "names": True, "case_sensitive": True, - "encoding": "bytes"} - test = recfromcsv(data, dtype=None, **kwargs) - control = np.array([(0, 1), (2, 3)], - dtype=[('A', int), ('B', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - # - data = TextIO('A,B\n0,1\n2,N/A') - test = recfromcsv(data, dtype=None, usemask=True, **kwargs) - control = ma.array([(0, 1), (2, -1)], - mask=[(False, False), (False, True)], - dtype=[('A', int), ('B', int)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - assert_equal(test.A, [0, 2]) - # - data = TextIO('A,B\n0,1\n2,3') - test = recfromcsv(data, missing_values='N/A',) - control = np.array([(0, 1), (2, 3)], - dtype=[('a', int), ('b', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - # - data = TextIO('A,B\n0,1\n2,3') - dtype = [('a', int), ('b', float)] - test = recfromcsv(data, missing_values='N/A', dtype=dtype) - control = np.array([(0, 1), (2, 3)], - dtype=dtype) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - - # gh-10394 - data = TextIO('color\n"red"\n"blue"') - test = recfromcsv(data, converters={0: lambda x: x.strip('\"')}) - control = np.array([('red',), ('blue',)], dtype=[('color', (str, 4))]) - assert_equal(test.dtype, control.dtype) - assert_equal(test, control) - def test_max_rows(self): # Test the `max_rows` keyword argument. data = '1 2\n3 4\n5 6\n7 8\n9 10\n' @@ -2490,7 +2403,7 @@ def test_gft_using_generator(self): # gft doesn't work with unicode. def count(): for i in range(10): - yield asbytes("%d" % i) + yield asbytes(f"{i}") res = np.genfromtxt(count()) assert_array_equal(res, np.arange(10)) @@ -2512,9 +2425,9 @@ def test_auto_dtype_largeint(self): assert_equal(test.dtype.names, ['f0', 'f1', 'f2']) - assert_(test.dtype['f0'] == float) - assert_(test.dtype['f1'] == np.int64) - assert_(test.dtype['f2'] == np.int_) + assert_(test.dtype['f0'].type is np.float64) + assert_(test.dtype['f1'].type is np.int64) + assert_(test.dtype['f2'].type is np.int_) assert_allclose(test['f0'], 73786976294838206464.) assert_equal(test['f1'], 17179869184) @@ -2613,9 +2526,6 @@ def test_save_load_memmap(self): assert_array_equal(data, a) # close the mem-mapped file del data - if IS_PYPY: - break_cycles() - break_cycles() @pytest.mark.xfail(IS_WASM, reason="memmap doesn't work correctly") @pytest.mark.parametrize("filename_type", [Path, str]) @@ -2628,9 +2538,6 @@ def test_save_load_memmap_readwrite(self, filename_type): a[0][0] = 5 b[0][0] = 5 del b # closes the file - if IS_PYPY: - break_cycles() - break_cycles() data = np.load(path) assert_array_equal(data, a) @@ -2660,38 +2567,6 @@ def test_genfromtxt(self, filename_type): data = np.genfromtxt(path) assert_array_equal(a, data) - @pytest.mark.parametrize("filename_type", [Path, str]) - @pytest.mark.filterwarnings("ignore:.*recfromtxt.*:DeprecationWarning") - def test_recfromtxt(self, filename_type): - with temppath(suffix='.txt') as path: - path = filename_type(path) - with open(path, 'w') as f: - f.write('A,B\n0,1\n2,3') - - kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True} - test = recfromtxt(path, **kwargs) - control = np.array([(0, 1), (2, 3)], - dtype=[('A', int), ('B', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - - @pytest.mark.parametrize("filename_type", [Path, str]) - @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") - def test_recfromcsv(self, filename_type): - with temppath(suffix='.txt') as path: - path = filename_type(path) - with open(path, 'w') as f: - f.write('A,B\n0,1\n2,3') - - kwargs = { - "missing_values": "N/A", "names": True, "case_sensitive": True - } - test = recfromcsv(path, dtype=None, **kwargs) - control = np.array([(0, 1), (2, 3)], - dtype=[('A', int), ('B', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - def test_gzip_load(): a = np.random.random((5, 5)) diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index 7a4ed17e7f07..dcac6f1a866d 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -13,7 +13,7 @@ import numpy as np from numpy.ma.testutils import assert_equal -from numpy.testing import HAS_REFCOUNT, IS_PYPY, assert_array_equal +from numpy.testing import HAS_REFCOUNT, assert_array_equal def test_scientific_notation(): @@ -204,8 +204,6 @@ def test_maxrows_no_blank_lines(dtype): assert_equal(res, np.array([["1.5", "2.5"], ["3.0", "4.0"]], dtype=dtype)) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", (np.dtype("f8"), np.dtype("i2"))) def test_exception_message_bad_values(dtype): txt = StringIO("1,2\n3,XXX\n5,6") @@ -393,8 +391,6 @@ def test_bool(): assert_array_equal(res.view(np.uint8), [[1, 0], [1, 1]]) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) @pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") def test_integer_signs(dtype): @@ -411,8 +407,6 @@ def test_integer_signs(dtype): np.loadtxt([f"{sign}2\n"], dtype=dtype) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) @pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") def test_implicit_cast_float_to_int_fails(dtype): @@ -483,8 +477,6 @@ def conv(x): assert sys.getrefcount(sentinel) == 2 -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_character_not_bytes_compatible(): """Test exception when a character cannot be encoded as 'S'.""" data = StringIO("–") # == \u2013 @@ -502,8 +494,6 @@ def test_invalid_converter(conv): np.loadtxt(StringIO("1 2\n3 4"), converters=conv) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_converters_dict_raises_non_integer_key(): with pytest.raises(TypeError, match="keys of the converters dict"): np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}) @@ -539,7 +529,7 @@ def test_quoted_field(q): @pytest.mark.parametrize("q", ('"', "'", "`")) -def test_quoted_field_with_whitepace_delimiter(q): +def test_quoted_field_with_whitespace_delimiter(q): txt = StringIO( f"{q}alpha, x{q} 2.5\n{q}beta, y{q} 4.5\n{q}gamma, z{q} 5.0\n" ) @@ -569,8 +559,6 @@ def test_quote_support_default(): assert_array_equal(res, expected) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_quotechar_multichar_error(): txt = StringIO("1,2\n3,4") msg = r".*must be a single unicode character or None" @@ -684,9 +672,7 @@ def test_warn_on_skipped_data(skiprows): ("float16", 3.07e-05), ("float32", 9.2557e-41), ("complex64", 9.2557e-41 + 2.8622554e-29j), ("float64", -1.758571353180402e-24), - # Here and below, the repr side-steps a small loss of precision in - # complex `str` in PyPy (which is probably fine, as repr works): - ("complex128", repr(5.406409232372729e-29 - 1.758571353180402e-24j)), + ("complex128", 5.406409232372729e-29 - 1.758571353180402e-24j), # Use integer values that fit into double. Everything else leads to # problems due to longdoubles going via double and decimal strings # causing rounding errors. @@ -697,7 +683,7 @@ def test_warn_on_skipped_data(skiprows): def test_byteswapping_and_unaligned(dtype, value, swap): # Try to create "interesting" values within the valid unicode range: dtype = np.dtype(dtype) - data = [f"x,{value}\n"] # repr as PyPy `str` truncates some + data = [f"x,{value}\n"] if swap: dtype = dtype.newbyteorder() full_dt = np.dtype([("a", "S1"), ("b", dtype)], align=False) @@ -730,8 +716,6 @@ def test_unicode_whitespace_stripping_complex(dtype): assert_array_equal(res, np.array([[1, 2 + 3j, 4 + 5j, 6 - 7j, 8j, 9j]] * 2)) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", "FD") @pytest.mark.parametrize("field", ["1 +2j", "1+ 2j", "1+2 j", "1+-+3", "(1j", "(1", "(1+2j", "1+2j)"]) @@ -740,8 +724,6 @@ def test_bad_complex(dtype, field): np.loadtxt([field + "\n"], dtype=dtype, delimiter=",") -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"] + "efgdFDG" + "?") def test_nul_character_error(dtype): @@ -753,8 +735,6 @@ def test_nul_character_error(dtype): np.loadtxt(["1\000"], dtype=dtype, delimiter=",", quotechar='"') -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"] + "efgdFDG" + "?") def test_no_thousands_support(dtype): @@ -1022,8 +1002,6 @@ def test_str_dtype_unit_discovery_with_converter(): assert_equal(a, expected) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_control_character_empty(): with pytest.raises(TypeError, match="Text reading control character must"): np.loadtxt(StringIO("1 2 3"), delimiter="") diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 6ef86bf84ee0..523623f10aae 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -835,7 +835,7 @@ def test_nanstd_with_mean_keyword(self): "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as" ) -# All `inexact` + `timdelta64` type codes +# All `inexact` + `timedelta64` type codes _TYPE_CODES = list(np.typecodes["AllFloat"]) _TYPE_CODES += [f"m8[{unit}]" for unit in _TIME_UNITS] @@ -945,27 +945,37 @@ def test_result_values(self): @pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize("dtype", _TYPE_CODES) def test_allnans(self, dtype, axis): - mat = np.full((3, 3), np.nan).astype(dtype) - with pytest.warns(RuntimeWarning) as r: + mat = np.full((3, 3), np.nan, dtype=dtype) + with pytest.warns((RuntimeWarning, DeprecationWarning)) as r: output = np.nanmedian(mat, axis=axis) assert output.dtype == mat.dtype assert np.isnan(output).all() + _filtered_record = [ + item.message + for item in r + if "All-NaN slice encountered" in str(item.message) + ] if axis is None: - assert_(len(r) == 1) + assert_(len(_filtered_record) == 1) else: - assert_(len(r) == 3) + assert_(len(_filtered_record) == 3) # Check scalar - scalar = np.array(np.nan).astype(dtype)[()] + scalar = np.full((1, 1), np.nan, dtype=dtype)[0, 0] output_scalar = np.nanmedian(scalar) assert output_scalar.dtype == scalar.dtype assert np.isnan(output_scalar) + _filtered_record = [ + item.message + for item in r + if "All-NaN slice encountered" in str(item.message) + ] if axis is None: - assert_(len(r) == 2) + assert_(len(_filtered_record) == 2) else: - assert_(len(r) == 4) + assert_(len(_filtered_record) == 4) def test_empty(self): mat = np.zeros((0, 3)) diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py index 32547f8e6c18..a388ab7bace5 100644 --- a/numpy/lib/tests/test_polynomial.py +++ b/numpy/lib/tests/test_polynomial.py @@ -143,6 +143,14 @@ def test_roots(self): # to take into account numerical calculation error assert_almost_equal(res, tgt, 14 - int(np.log10(i))) + @pytest.mark.parametrize("dtyp", [int, np.float32, np.float64]) + def test_roots_dtype(self, dtyp): + coef = np.asarray([1, 0, -1], dtype=dtyp) # x**2 - 1 + r = np.roots(coef) + r.sort() + assert_allclose(r, np.asarray([-1, 1])) + assert r.dtype == {int: np.float64}.get(dtyp, dtyp) + def test_str_leading_zeros(self): p = np.poly1d([4, 3, 2, 1]) p[3] = 0 diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py index fb654b4cfb85..c9a475b392c3 100644 --- a/numpy/lib/tests/test_stride_tricks.py +++ b/numpy/lib/tests/test_stride_tricks.py @@ -653,3 +653,231 @@ def test_reference_types(): actual, _ = broadcast_arrays(input_array, np.ones(3)) assert_array_equal(expected, actual) + + +@pytest.mark.parametrize( + "dtype", + [ + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.float32, + np.float64, + np.complex64, + np.complex128, + ], +) +def test_as_strided_checked_different_dtypes(dtype): + """Test as_strided with check_bounds=True with different dtypes.""" + x = np.arange(10, dtype=dtype) + y = as_strided(x, shape=(5,), strides=(x.itemsize * 2,), check_bounds=True) + assert y.shape == (5,) + assert y.dtype == dtype + + +@pytest.mark.parametrize( + "size,view_size,stride_mult", + [ + (10, 5, 1), # Contiguous view + (10, 5, 2), # Every other element + (20, 10, 2), # Every other element + (100, 10, 10), # Every 10th element + ], +) +def test_as_strided_checked_1d_positive_strides(size, view_size, stride_mult): + """Test 1D arrays with positive strides.""" + x = np.arange(size, dtype=np.int64) + itemsize = x.itemsize + y = as_strided( + x, shape=(view_size,), strides=(itemsize * stride_mult,), check_bounds=True + ) + assert y.shape == (view_size,) + # Verify data correctness + expected = x[::stride_mult][:view_size] + assert_array_equal(y, expected) + + +@pytest.mark.parametrize( + "shape,window_shape", + [ + ((10,), (3,)), + ((20,), (5,)), + ((100,), (10,)), + ], +) +def test_as_strided_checked_sliding_window_1d(shape, window_shape): + """Test sliding window views in 1D.""" + x = np.arange(shape[0], dtype=np.int64) + itemsize = x.itemsize + n_windows = shape[0] - window_shape[0] + 1 + view_shape = (n_windows, window_shape[0]) + view_strides = (itemsize, itemsize) + + y = as_strided(x, shape=view_shape, strides=view_strides, check_bounds=True) + assert y.shape == view_shape + # Check first and last windows + assert_array_equal(y[0], x[: window_shape[0]]) + assert_array_equal(y[-1], x[-window_shape[0] :]) + + +@pytest.mark.parametrize( + "shape", + [ + (3, 4), + (5, 6), + (10, 10), + ], +) +def test_as_strided_checked_2d_default_strides(shape): + """Test 2D arrays with default strides.""" + x = np.arange(np.prod(shape), dtype=np.int64).reshape(shape) + y = as_strided(x, check_bounds=True) # Should use default shape and strides + assert_array_equal(y, x) + + +@pytest.mark.parametrize("size", [0, 1, 2, 10, 100]) +def test_as_strided_checked_zero_stride_broadcasting(size): + """Test zero strides (broadcasting a single value).""" + x = np.array([42], dtype=np.int64) + y = as_strided(x, shape=(size,), strides=(0,), check_bounds=True) + assert y.shape == (size,) + if size > 0: + assert_(np.all(y == 42)) + + +@pytest.mark.parametrize( + "size,shape,strides", + [ + # Strides too large + (10, (5,), (32,)), + (10, (10,), (16,)), + (20, (15,), (16,)), + # Shape too large for strides + (10, (20,), (8,)), + (10, (100,), (8,)), + # 2D out of bounds cases + (20, (5, 5), (80, 8)), + (20, (3, 10), (64, 8)), + # Negative strides that go before array start + (10, (5,), (-8,)), + (10, (10,), (-8,)), + (20, (5,), (-16,)), + # ND negative strides + (10, (2, 3, 4), (96, 32, -8)), + (20, (3, 4), (64, -8)), + (30, (2, 3, 4), (-96, 32, 8)), + ], +) +def test_as_strided_checked_out_of_bounds_positive_strides(size, shape, strides): + """Test that out-of-bounds positive strides raise ValueError.""" + x = np.arange(size, dtype=np.int64) + with pytest.raises(ValueError, match="out of bounds"): + as_strided(x, shape=shape, strides=strides, check_bounds=True) + + +def test_as_strided_checked_view_of_larger_array(): + """Test as_strided + + - with check_bounds=True + - considers the base array bounds, not just the view. + + """ + a = np.arange(1000, dtype=np.int64) + + b = a[:2] + + # This should succeed because the underlying array has enough memory + y = as_strided(b, shape=(2,), strides=(400,), check_bounds=True) + assert_equal(y.shape, (2,)) + assert_equal(y[0], 0) + assert_equal(y[1], 50) + + +def test_as_strided_checked_view_with_offset(): + """Test as_strided + + - with check_bounds=True + - on a view that doesn't start at the beginning. + """ + a = np.arange(1000, dtype=np.int64) + + b = a[100:102] + + y = as_strided(b, shape=(2,), strides=(80,), check_bounds=True) + assert_equal(y.shape, (2,)) + assert_equal(y[0], 100) + assert_equal(y[1], 110) + + +def test_as_strided_checked_view_out_of_bounds_negative(): + """Test that negative strides on a view correctly detect out of bounds.""" + a = np.arange(1000, dtype=np.int64) + + b = a[5:7] + + with pytest.raises(ValueError, match="out of bounds"): + as_strided(b, shape=(2,), strides=(-48,), check_bounds=True) + + +def test_as_strided_checked_view_out_of_bounds_positive(): + """Test that positive strides on a view correctly detect out of bounds.""" + a = np.arange(100, dtype=np.int64) + + b = a[95:97] + + with pytest.raises(ValueError, match="out of bounds"): + as_strided(b, shape=(2,), strides=(200,), check_bounds=True) + + +def test_as_strided_checked_nested_views(): + """Test as_strided with check_bounds=True on a view of a view.""" + a = np.arange(1000, dtype=np.int64) + b = a[10:100] + c = b[5:10] + + y = as_strided(c, shape=(2,), strides=(160,), check_bounds=True) + assert_equal(y.shape, (2,)) + assert_equal(y[0], 15) + assert_equal(y[1], 35) + + +def test_as_strided_checked_sliced_array(): + """Test various slicing scenarios.""" + a = np.arange(200, dtype=np.int64) + + b = a[10:20] + y = as_strided(b, shape=(5,), strides=(16,), check_bounds=True) + assert_equal(y.shape, (5,)) + + c = a[::2] + y = as_strided(c, shape=(10,), strides=(16,), check_bounds=True) + assert_equal(y.shape, (10,)) + + +@pytest.mark.parametrize( + "start,stop,stride_bytes,should_pass", + [ + (0, 10, 552, True), + (0, 10, 552 + 1, True), + (90, 95, 72, True), + (90, 95, 72 + 1, False), + (5, 7, -40, True), + (5, 7, -40 - 1, False), + ], +) +def test_as_strided_checked_view_parametrized(start, stop, stride_bytes, should_pass): + """Parametrized test for various view and stride combinations.""" + a = np.arange(100, dtype=np.int64) + b = a[start:stop] + + if should_pass: + y = as_strided(b, shape=(2,), strides=(stride_bytes,), check_bounds=True) + assert_equal(y.shape, (2,)) + else: + with pytest.raises(ValueError, match="out of bounds"): + as_strided(b, shape=(2,), strides=(stride_bytes,), check_bounds=True) diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py index eb6aa69a443c..43ef45ad5d48 100644 --- a/numpy/lib/tests/test_twodim_base.py +++ b/numpy/lib/tests/test_twodim_base.py @@ -467,6 +467,7 @@ def test_triu_indices(self): iu2 = triu_indices(4, k=2) iu3 = triu_indices(4, m=5) iu4 = triu_indices(4, k=2, m=5) + iu5 = triu_indices(np.uint64(4), m=np.uint8(4)) a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], @@ -510,6 +511,10 @@ def test_triu_indices(self): [11, 12, -1, -1, -10], [16, 17, 18, -1, -1]])) + # For unsigned integer + assert_array_equal(iu5, + (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), + array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))) class TestTrilIndicesFrom: def test_exceptions(self): diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py index b4257ebf9191..8452a913c98c 100644 --- a/numpy/lib/tests/test_ufunclike.py +++ b/numpy/lib/tests/test_ufunclike.py @@ -1,3 +1,5 @@ +import pytest + import numpy as np from numpy import fix, isneginf, isposinf from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises @@ -35,6 +37,7 @@ def test_isneginf(self): with assert_raises(TypeError): isneginf(a) + @pytest.mark.filterwarnings("ignore:numpy.fix is deprecated:DeprecationWarning") def test_fix(self): a = np.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]]) out = np.zeros(a.shape, float) @@ -47,6 +50,7 @@ def test_fix(self): assert_equal(out, tgt) assert_equal(fix(3.14), 3) + @pytest.mark.filterwarnings("ignore:numpy.fix is deprecated:DeprecationWarning") def test_fix_with_subclass(self): class MyArray(np.ndarray): def __new__(cls, data, metadata=None): @@ -79,6 +83,7 @@ def __array_finalize__(self, obj): assert_(isinstance(f0d, MyArray)) assert_equal(f0d.metadata, 'bar') + @pytest.mark.filterwarnings("ignore:numpy.fix is deprecated:DeprecationWarning") def test_scalar(self): x = np.inf actual = np.isposinf(x) @@ -95,3 +100,22 @@ def test_scalar(self): out = np.array(0.0) actual = np.fix(x, out=out) assert_(actual is out) + + +class TestFixDeprecation: + """Test that numpy.fix emits a DeprecationWarning.""" + + def test_fix_emits_deprecation_warning(self): + a = np.array([1.5, 2.7, -1.5, -2.7]) + with pytest.warns(DeprecationWarning, match="numpy.fix is deprecated"): + fix(a) + + def test_fix_scalar_emits_deprecation_warning(self): + with pytest.warns(DeprecationWarning, match="numpy.fix is deprecated"): + fix(3.14) + + def test_fix_with_out_emits_deprecation_warning(self): + a = np.array([1.5, 2.7]) + out = np.zeros(a.shape) + with pytest.warns(DeprecationWarning, match="numpy.fix is deprecated"): + fix(a, out=out) diff --git a/numpy/lib/user_array.pyi b/numpy/lib/user_array.pyi index 9b90d893326b..af90126ad6c9 100644 --- a/numpy/lib/user_array.pyi +++ b/numpy/lib/user_array.pyi @@ -1 +1 @@ -from ._user_array_impl import container as container +from ._user_array_impl import container as container # type: ignore[deprecated] diff --git a/numpy/linalg/__init__.py b/numpy/linalg/__init__.py index cc482cfc9579..4e4c65758fa7 100644 --- a/numpy/linalg/__init__.py +++ b/numpy/linalg/__init__.py @@ -31,12 +31,12 @@ matrix_power tensordot matmul + outer Decompositions -------------- cholesky - outer qr svd svdvals diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 6884c9b7ef8d..f73e658dd82a 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -186,6 +186,19 @@ def _realType(t, default=double): def _complexType(t, default=cdouble): return _complex_types_map.get(t, default) + +def _to_real_if_imag_zero(w, t): + """Backwards compat helper: force w to be real if t.dtype is real and w.imag == 0 + """ + result_t = t.dtype.type + if not isComplexType(result_t) and all(w.imag == 0.0): + w = w.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + return w.astype(result_t, copy=False) + + def _commonType(*arrays): # in lite version, use higher precision (always double or cdouble) result_type = single @@ -226,22 +239,22 @@ def _to_native_byte_order(*arrays): def _assert_2d(*arrays): for a in arrays: if a.ndim != 2: - raise LinAlgError('%d-dimensional array given. Array must be ' - 'two-dimensional' % a.ndim) + raise LinAlgError(f'{a.ndim}-dimensional array given. Array must be ' + 'two-dimensional') def _assert_stacked_2d(*arrays): for a in arrays: if a.ndim < 2: - raise LinAlgError('%d-dimensional array given. Array must be ' - 'at least two-dimensional' % a.ndim) + raise LinAlgError(f'{a.ndim}-dimensional array given. Array must be ' + 'at least two-dimensional') def _assert_stacked_square(*arrays): for a in arrays: try: m, n = a.shape[-2:] except ValueError: - raise LinAlgError('%d-dimensional array given. Array must be ' - 'at least two-dimensional' % a.ndim) + raise LinAlgError(f'{a.ndim}-dimensional array given. Array must be ' + 'at least two-dimensional') if m != n: raise LinAlgError('Last 2 dimensions of the array must be square') @@ -352,8 +365,7 @@ def tensorsolve(a, b, axes=None): a = a.reshape(prod, prod) b = b.ravel() res = wrap(solve(a, b)) - res.shape = oldshape - return res + return res.reshape(oldshape) def _solve_dispatcher(a, b): @@ -938,7 +950,7 @@ def outer(x1, x2, /): An example using a "vector" of letters: - >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> x = np.array(['a', 'b', 'c'], dtype=np.object_) >>> np.linalg.outer(x, [1, 2, 3]) array([['a', 'aa', 'aaa'], ['b', 'bb', 'bbb'], @@ -1230,11 +1242,11 @@ def eigvals(a): >>> D = np.diag((-1,1)) >>> LA.eigvals(D) - array([-1., 1.]) + array([-1. + 0.j, 1. + 0.j]) >>> A = np.dot(Q, D) >>> A = np.dot(A, Q.T) >>> LA.eigvals(A) - array([ 1., -1.]) # random + array([ 1., -1.]) # random """ a, wrap = _makearray(a) @@ -1248,14 +1260,7 @@ def eigvals(a): under='ignore'): w = _umath_linalg.eigvals(a, signature=signature) - if not isComplexType(t): - if all(w.imag == 0): - w = w.real - result_t = _realType(result_t) - else: - result_t = _complexType(result_t) - - return w.astype(result_t, copy=False) + return w.astype(_complexType(result_t), copy=False) def _eigvalsh_dispatcher(a, UPLO=None): @@ -1451,8 +1456,8 @@ def eig(a): >>> eigenvalues, eigenvectors = LA.eig(np.diag((1, 2, 3))) >>> eigenvalues - array([1., 2., 3.]) - >>> eigenvectors + array([1. + 0j, 2. + 0j, 3. + 0j]) + >>> eigenvectors.real array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) @@ -1484,8 +1489,8 @@ def eig(a): >>> # Theor. eigenvalues are 1 +/- 1e-9 >>> eigenvalues, eigenvectors = LA.eig(a) >>> eigenvalues - array([1., 1.]) - >>> eigenvectors + array([1.+0j, 1.+0j]) + >>> eigenvectors.real array([[1., 0.], [0., 1.]]) @@ -1501,15 +1506,9 @@ def eig(a): under='ignore'): w, vt = _umath_linalg.eig(a, signature=signature) - if not isComplexType(t) and all(w.imag == 0.0): - w = w.real - vt = vt.real - result_t = _realType(result_t) - else: - result_t = _complexType(result_t) - - vt = vt.astype(result_t, copy=False) - return EigResult(w.astype(result_t, copy=False), wrap(vt)) + w = w.astype(_complexType(result_t), copy=False) + vt = vt.astype(_complexType(result_t), copy=False) + return EigResult(w, wrap(vt)) @array_function_dispatch(_eigvalsh_dispatcher) @@ -1764,7 +1763,7 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): ((9, 9), (6,), (6, 6)) >>> np.allclose(a, np.dot(U[:, :6] * S, Vh)) True - >>> smat = np.zeros((9, 6), dtype=complex) + >>> smat = np.zeros((9, 6), dtype=np.complex128) >>> smat[:6, :6] = np.diag(S) >>> np.allclose(a, np.dot(U, np.dot(smat, Vh))) True @@ -2130,6 +2129,7 @@ def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): A = asarray(A) if A.ndim < 2: return int(not all(A == 0)) + S = svd(A, compute_uv=False, hermitian=hermitian) if tol is None: @@ -2137,7 +2137,7 @@ def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): rtol = max(A.shape[-2:]) * finfo(S.dtype).eps else: rtol = asarray(rtol)[..., newaxis] - tol = S.max(axis=-1, keepdims=True) * rtol + tol = S.max(axis=-1, keepdims=True, initial=0) * rtol else: tol = asarray(tol)[..., newaxis] @@ -3305,14 +3305,6 @@ def cross(x1, x2, /, *, axis=-1): """ x1 = asanyarray(x1) x2 = asanyarray(x2) - - if x1.shape[axis] != 3 or x2.shape[axis] != 3: - raise ValueError( - "Both input arrays must be (arrays of) 3-dimensional vectors, " - f"but they are {x1.shape[axis]} and {x2.shape[axis]} " - "dimensional instead." - ) - return _core_cross(x1, x2, axis=axis) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 60320b021c71..8eb3e57cf1e2 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -1,45 +1,36 @@ -from collections.abc import Iterable +from collections.abc import Iterable, Sequence from typing import ( Any, + Generic, Literal as L, NamedTuple, Never, + Protocol, SupportsIndex, - SupportsInt, - TypeAlias, - TypeVar, overload, + type_check_only, ) +from typing_extensions import TypeVar import numpy as np -from numpy import ( - complex128, - complexfloating, - float64, - floating, - int32, - object_, - signedinteger, - timedelta64, - unsignedinteger, - vecdot, -) +from numpy import vecdot from numpy._core.fromnumeric import matrix_transpose -from numpy._globals import _NoValueType +from numpy._globals import _NoValue, _NoValueType from numpy._typing import ( ArrayLike, DTypeLike, NDArray, + _AnyShape, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, - _ArrayLikeNumber_co, _ArrayLikeObject_co, _ArrayLikeTD64_co, - _ArrayLikeUInt_co, + _DTypeLike, _NestedSequence, + _Shape, _ShapeLike, ) from numpy.linalg import LinAlgError @@ -79,470 +70,1155 @@ __all__ = [ "vecdot", ] -_NumberT = TypeVar("_NumberT", bound=np.number) -_NumericScalarT = TypeVar("_NumericScalarT", bound=np.number | np.timedelta64 | np.object_) +type _AtMost1D = tuple[()] | tuple[int] +type _AtLeast1D = tuple[int, *tuple[int, ...]] +type _AtLeast2D = tuple[int, int, *tuple[int, ...]] +type _AtLeast3D = tuple[int, int, int, *tuple[int, ...]] +type _AtLeast4D = tuple[int, int, int, int, *tuple[int, ...]] +type _JustAnyShape = tuple[Never, ...] # workaround for microsoft/pyright#10232 -_ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"] +type _tuple2[T] = tuple[T, T] +type _Ax2 = SupportsIndex | _tuple2[SupportsIndex] -### +type _inexact32 = np.float32 | np.complex64 +type _inexact80 = np.longdouble | np.clongdouble +type _to_integer = np.integer | np.bool +type _to_timedelta64 = np.timedelta64 | _to_integer +type _to_float64 = np.float64 | _to_integer +type _to_inexact64 = np.complex128 | _to_float64 +type _to_inexact64_unsafe = _to_inexact64 | np.datetime64 | np.timedelta64 | np.character +type _to_complex = np.number | np.bool +type _to_float64_co = np.float64 | np.float32 | np.float16 | _to_integer +type _to_complex128_co = np.complex128 | np.complex64 | _to_float64_co -fortran_int = np.intc +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3ND[ScalarT: np.generic] = np.ndarray[_AtLeast3D, np.dtype[ScalarT]] -class EigResult(NamedTuple): - eigenvalues: NDArray[Any] - eigenvectors: NDArray[Any] +type _Sequence2D[T] = Sequence[Sequence[T]] +type _Sequence3D[T] = Sequence[_Sequence2D[T]] +type _Sequence2ND[T] = _NestedSequence[Sequence[T]] +type _Sequence3ND[T] = _NestedSequence[_Sequence2D[T]] +type _Sequence4ND[T] = _NestedSequence[_Sequence3D[T]] +type _Sequence0D1D[T] = T | Sequence[T] +type _Sequence1D2D[T] = Sequence[T] | _Sequence2D[T] -class EighResult(NamedTuple): - eigenvalues: NDArray[Any] - eigenvectors: NDArray[Any] +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[tuple[int], np.dtype[ScalarT]] | Sequence[ScalarT] # ==1d +type _ArrayLike2D[ScalarT: np.generic] = _SupportsArray[tuple[int, int], np.dtype[ScalarT]] | _Sequence2D[ScalarT] # ==2d +type _ArrayLike1D2D[ScalarT: np.generic] = ( # 1d or 2d + _SupportsArray[tuple[int] | tuple[int, int], np.dtype[ScalarT]] | _Sequence1D2D[ScalarT] +) +type _ArrayLike3D[ScalarT: np.generic] = _SupportsArray[tuple[int, int, int], np.dtype[ScalarT]] | _Sequence3D[ScalarT] # ==3d +type _ArrayLike1ND[ScalarT: np.generic] = _SupportsArray[_AtLeast1D, np.dtype[ScalarT]] | _NestedSequence[ScalarT] # >=1d +type _ArrayLike2ND[ScalarT: np.generic] = _SupportsArray[_AtLeast2D, np.dtype[ScalarT]] | _Sequence2ND[ScalarT] # >=2d +type _ArrayLike3ND[ScalarT: np.generic] = _SupportsArray[_AtLeast3D, np.dtype[ScalarT]] | _Sequence3ND[ScalarT] # >=3d +type _ArrayLike4ND[ScalarT: np.generic] = _SupportsArray[_AtLeast4D, np.dtype[ScalarT]] | _Sequence4ND[ScalarT] # >=3d -class QRResult(NamedTuple): - Q: NDArray[Any] - R: NDArray[Any] +# safe-castable array-likes +type _ToArrayBool_1d = _ArrayLike1D[np.bool_] | Sequence[bool] +type _ToArrayBool_1nd = _ArrayLike1ND[np.bool_] | _NestedSequence[bool] +type _ToArrayBool_2nd = _ArrayLike2ND[np.bool_] | _Sequence2ND[bool] +type _ToArrayInt_1d = _ArrayLike1D[_to_integer] | Sequence[int] +type _ToArrayInt_1nd = _ArrayLike1ND[_to_integer] | _NestedSequence[int] +type _ToArrayInt_2nd = _ArrayLike2ND[_to_integer] | _Sequence2ND[int] +type _ToArrayF64 = _ArrayLike[_to_float64] | _NestedSequence[float] +type _ToArrayF64_1d = _ArrayLike1D[_to_float64_co] | Sequence[float] +type _ToArrayF64_1nd = _ArrayLike1ND[_to_float64_co] | _NestedSequence[float] +type _ToArrayF64_2nd = _ArrayLike2ND[_to_float64_co] | _Sequence2ND[float] +type _ToArrayC128 = _ArrayLike[_to_inexact64] | _NestedSequence[complex] +type _ToArrayC128_1d = _ArrayLike1D[_to_complex128_co] | Sequence[complex] +type _ToArrayC128_1nd = _ArrayLike1ND[_to_complex128_co] | _NestedSequence[complex] +type _ToArrayC128_2nd = _ArrayLike2ND[_to_complex128_co] | _Sequence2ND[complex] +type _ToArrayComplex_1d = _ArrayLike1D[_to_complex] | Sequence[complex] +type _ToArrayComplex_2d = _ArrayLike2D[_to_complex] | _Sequence2D[complex] +type _ToArrayComplex_3d = _ArrayLike3D[_to_complex] | _Sequence3D[complex] +type _ToArrayComplex_1nd = _ArrayLike1ND[_to_complex] | _NestedSequence[complex] +type _ToArrayComplex_2nd = _ArrayLike2ND[_to_complex] | _Sequence2ND[complex] +# the invariant `list` type avoids overlap with bool, int, etc +type _AsArrayI64 = _ArrayLike[np.int64] | list[int] | _NestedSequence[list[int]] +type _AsArrayI64_1d = _ArrayLike1D[np.int64] | list[int] +type _AsArrayI64_1nd = _ArrayLike1ND[np.int64] | list[int] | _NestedSequence[list[int]] +type _AsArrayI64_2nd = _ArrayLike2ND[np.int64] | _NestedSequence[list[int]] +type _AsArrayF64 = _ArrayLike[np.float64] | list[float] | _NestedSequence[list[float]] +type _AsArrayF64_1d = _ArrayLike1D[np.float64] | list[float] +type _AsArrayF64_1nd = _ArrayLike1ND[np.float64] | list[float] | _NestedSequence[list[float]] +type _AsArrayF64_2nd = _ArrayLike2ND[np.float64] | _NestedSequence[list[float]] +type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[list[complex]] +type _AsArrayC128_1d = _ArrayLike1D[np.complex128] | list[complex] +type _AsArrayC128_2d = _ArrayLike2D[np.complex128] | Sequence[list[complex]] +type _AsArrayC128_1nd = _ArrayLike1ND[np.complex128] | list[complex] | _NestedSequence[list[complex]] +type _AsArrayC128_2nd = _ArrayLike2ND[np.complex128] | _NestedSequence[list[complex]] +type _AsArrayC128_3nd = _ArrayLike3ND[np.complex128] | _Sequence2ND[list[complex]] -class SlogdetResult(NamedTuple): - # TODO: `sign` and `logabsdet` are scalars for input 2D arrays and - # a `(x.ndim - 2)`` dimensionl arrays otherwise - sign: Any - logabsdet: Any +type _OrderKind = L[1, -1, 2, -2, "fro", "nuc"] | float # only accepts `-inf` and `inf` as `float` +type _SideKind = L["L", "U", "l", "u"] +type _NonNegInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] +type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] -class SVDResult(NamedTuple): - U: NDArray[Any] - S: NDArray[Any] - Vh: NDArray[Any] +type _LstSqResult[ShapeT: _Shape, InexactT: np.inexact, FloatingT: np.floating] = tuple[ + np.ndarray[ShapeT, np.dtype[InexactT]], # least-squares solution + _Array1D[FloatingT], # residuals + np.int32, # rank + _Array1D[FloatingT], # singular values +] -@overload -def tensorsolve( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, - axes: Iterable[int] | None = None, -) -> NDArray[float64]: ... -@overload -def tensorsolve( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - axes: Iterable[int] | None = None, -) -> NDArray[floating]: ... -@overload -def tensorsolve( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - axes: Iterable[int] | None = None, -) -> NDArray[complexfloating]: ... +_FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=Any, covariant=True) +_FloatingOrArrayT_co = TypeVar("_FloatingOrArrayT_co", bound=np.floating | NDArray[np.floating], default=Any, covariant=True) +_InexactT_co = TypeVar("_InexactT_co", bound=np.inexact, default=Any, covariant=True) +_InexactOrArrayT_co = TypeVar("_InexactOrArrayT_co", bound=np.inexact | NDArray[np.inexact], default=Any, covariant=True) -@overload -def solve( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, -) -> NDArray[float64]: ... -@overload -def solve( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, -) -> NDArray[floating]: ... -@overload -def solve( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, -) -> NDArray[complexfloating]: ... +# shape-typed variant of numpy._typing._SupportsArray +@type_check_only +class _SupportsArray[ShapeT: _Shape, DTypeT: np.dtype](Protocol): + def __array__(self, /) -> np.ndarray[ShapeT, DTypeT]: ... -@overload -def tensorinv( - a: _ArrayLikeInt_co, - ind: int = 2, -) -> NDArray[float64]: ... -@overload -def tensorinv( - a: _ArrayLikeFloat_co, - ind: int = 2, -) -> NDArray[floating]: ... -@overload -def tensorinv( - a: _ArrayLikeComplex_co, - ind: int = 2, -) -> NDArray[complexfloating]: ... +### -@overload -def inv(a: _ArrayLikeInt_co) -> NDArray[float64]: ... -@overload -def inv(a: _ArrayLikeFloat_co) -> NDArray[floating]: ... -@overload -def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... +fortran_int = np.intc -# TODO: The supported input and output dtypes are dependent on the value of `n`. -# For example: `n < 0` always casts integer types to float64 -def matrix_power( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - n: SupportsIndex, -) -> NDArray[Any]: ... +# NOTE: These named tuple types are only generic when `typing.TYPE_CHECKING` -@overload -def cholesky(a: _ArrayLikeInt_co, /, *, upper: bool = False) -> NDArray[float64]: ... -@overload -def cholesky(a: _ArrayLikeFloat_co, /, *, upper: bool = False) -> NDArray[floating]: ... -@overload -def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> NDArray[complexfloating]: ... +class EigResult(NamedTuple, Generic[_InexactT_co]): + eigenvalues: NDArray[_InexactT_co] + eigenvectors: NDArray[_InexactT_co] -@overload -def outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never], /) -> NDArray[Any]: ... -@overload -def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... -@overload -def outer(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... -@overload -def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... -@overload -def outer(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... -@overload -def outer(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... -@overload -def outer(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... -@overload -def outer(x1: _ArrayLikeTD64_co, x2: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... -@overload -def outer(x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co, /) -> NDArray[object_]: ... -@overload -def outer( - x1: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - x2: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - /, -) -> NDArray[Any]: ... +class EighResult(NamedTuple, Generic[_FloatingT_co, _InexactT_co]): + eigenvalues: NDArray[_FloatingT_co] + eigenvectors: NDArray[_InexactT_co] -@overload -def qr(a: _ArrayLikeInt_co, mode: _ModeKind = "reduced") -> QRResult: ... -@overload -def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = "reduced") -> QRResult: ... -@overload -def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = "reduced") -> QRResult: ... +class QRResult(NamedTuple, Generic[_InexactT_co]): + Q: NDArray[_InexactT_co] + R: NDArray[_InexactT_co] -@overload -def eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]: ... -@overload -def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating] | NDArray[complexfloating]: ... -@overload -def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... +class SVDResult(NamedTuple, Generic[_FloatingT_co, _InexactT_co]): + U: NDArray[_InexactT_co] + S: NDArray[_FloatingT_co] + Vh: NDArray[_InexactT_co] -@overload -def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = "L") -> NDArray[float64]: ... -@overload -def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = "L") -> NDArray[floating]: ... +class SlogdetResult(NamedTuple, Generic[_FloatingOrArrayT_co, _InexactOrArrayT_co]): + sign: _FloatingOrArrayT_co + logabsdet: _InexactOrArrayT_co -@overload -def eig(a: _ArrayLikeInt_co) -> EigResult: ... -@overload -def eig(a: _ArrayLikeFloat_co) -> EigResult: ... -@overload -def eig(a: _ArrayLikeComplex_co) -> EigResult: ... +# keep in sync with `solve` +@overload # ~float64, +float64 +def tensorsolve(a: _ToArrayF64, b: _ArrayLikeFloat_co, axes: Iterable[int] | None = None) -> NDArray[np.float64]: ... +@overload # +float64, ~float64 +def tensorsolve(a: _ArrayLikeFloat_co, b: _ToArrayF64, axes: Iterable[int] | None = None) -> NDArray[np.float64]: ... +@overload # ~float32, ~float32 +def tensorsolve( + a: _ArrayLike[np.float32], b: _ArrayLike[np.float32], axes: Iterable[int] | None = None +) -> NDArray[np.float32]: ... +@overload # +float, +float +def tensorsolve(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, axes: Iterable[int] | None = None) -> NDArray[np.float64 | Any]: ... +@overload # ~complex128, +complex128 +def tensorsolve(a: _AsArrayC128, b: _ArrayLikeComplex_co, axes: Iterable[int] | None = None) -> NDArray[np.complex128]: ... +@overload # +complex128, ~complex128 +def tensorsolve(a: _ArrayLikeComplex_co, b: _AsArrayC128, axes: Iterable[int] | None = None) -> NDArray[np.complex128]: ... +@overload # ~complex64, +complex64 +def tensorsolve( + a: _ArrayLike[np.complex64], b: _ArrayLike[_inexact32], axes: Iterable[int] | None = None +) -> NDArray[np.complex64]: ... +@overload # +complex64, ~complex64 +def tensorsolve( + a: _ArrayLike[_inexact32], b: _ArrayLike[np.complex64], axes: Iterable[int] | None = None +) -> NDArray[np.complex64]: ... +@overload # +complex, +complex +def tensorsolve( + a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axes: Iterable[int] | None = None +) -> NDArray[np.complex128 | Any]: ... -@overload -def eigh( - a: _ArrayLikeInt_co, - UPLO: L["L", "U", "l", "u"] = "L", -) -> EighResult: ... -@overload -def eigh( - a: _ArrayLikeFloat_co, - UPLO: L["L", "U", "l", "u"] = "L", -) -> EighResult: ... -@overload -def eigh( - a: _ArrayLikeComplex_co, - UPLO: L["L", "U", "l", "u"] = "L", -) -> EighResult: ... +# keep in sync with `tensorsolve` +@overload # ~float64, +float64 +def solve(a: _ToArrayF64, b: _ArrayLikeFloat_co) -> NDArray[np.float64]: ... +@overload # +float64, ~float64 +def solve(a: _ArrayLikeFloat_co, b: _ToArrayF64) -> NDArray[np.float64]: ... +@overload # ~float32, ~float32 +def solve(a: _ArrayLike[np.float32], b: _ArrayLike[np.float32]) -> NDArray[np.float32]: ... +@overload # +float, +float +def solve(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[np.float64 | Any]: ... +@overload # ~complex128, +complex128 +def solve(a: _AsArrayC128, b: _ArrayLikeComplex_co) -> NDArray[np.complex128]: ... +@overload # +complex128, ~complex128 +def solve(a: _ArrayLikeComplex_co, b: _AsArrayC128) -> NDArray[np.complex128]: ... +@overload # ~complex64, +complex64 +def solve(a: _ArrayLike[np.complex64], b: _ArrayLike[_inexact32]) -> NDArray[np.complex64]: ... +@overload # +complex64, ~complex64 +def solve(a: _ArrayLike[_inexact32], b: _ArrayLike[np.complex64]) -> NDArray[np.complex64]: ... +@overload # +complex, +complex +def solve(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[np.complex128 | Any]: ... -@overload -def svd( - a: _ArrayLikeInt_co, - full_matrices: bool = True, - compute_uv: L[True] = True, +# keep in sync with the other inverse functions and cholesky +@overload # inexact32 +def tensorinv[ScalarT: _inexact32](a: _ArrayLike[ScalarT], ind: int = 2) -> NDArray[ScalarT]: ... +@overload # +float64 +def tensorinv(a: _ToArrayF64, ind: int = 2) -> NDArray[np.float64]: ... +@overload # ~complex128 +def tensorinv(a: _AsArrayC128, ind: int = 2) -> NDArray[np.complex128]: ... +@overload # fallback +def tensorinv(a: _ArrayLikeComplex_co, ind: int = 2) -> np.ndarray: ... + +# keep in sync with the other inverse functions and cholesky +@overload # inexact32 +def inv[ScalarT: _inexact32](a: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... +@overload # +float64 +def inv(a: _ToArrayF64) -> NDArray[np.float64]: ... +@overload # ~complex128 +def inv(a: _AsArrayC128) -> NDArray[np.complex128]: ... +@overload # fallback +def inv(a: _ArrayLikeComplex_co) -> np.ndarray: ... + +# keep in sync with the other inverse functions and cholesky +@overload # inexact32 +def pinv[ScalarT: _inexact32]( + a: _ArrayLike[ScalarT], + rcond: _ArrayLikeFloat_co | None = None, hermitian: bool = False, -) -> SVDResult: ... -@overload -def svd( - a: _ArrayLikeFloat_co, - full_matrices: bool = True, - compute_uv: L[True] = True, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[ScalarT]: ... +@overload # +float64 +def pinv( + a: _ToArrayF64, + rcond: _ArrayLikeFloat_co | None = None, hermitian: bool = False, -) -> SVDResult: ... -@overload -def svd( - a: _ArrayLikeComplex_co, - full_matrices: bool = True, - compute_uv: L[True] = True, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[np.float64]: ... +@overload # ~complex128 +def pinv( + a: _AsArrayC128, + rcond: _ArrayLikeFloat_co | None = None, hermitian: bool = False, -) -> SVDResult: ... -@overload -def svd( - a: _ArrayLikeInt_co, - full_matrices: bool = True, *, - compute_uv: L[False], + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[np.complex128]: ... +@overload # fallback +def pinv( + a: _ArrayLikeComplex_co, + rcond: _ArrayLikeFloat_co | None = None, hermitian: bool = False, -) -> NDArray[float64]: ... -@overload + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[Any]: ... + +# keep in sync with the inverse functions +@overload # inexact32 +def cholesky[ScalarT: _inexact32](a: _ArrayLike[ScalarT], /, *, upper: bool = False) -> NDArray[ScalarT]: ... +@overload # +float64 +def cholesky(a: _ToArrayF64, /, *, upper: bool = False) -> NDArray[np.float64]: ... +@overload # ~complex128 +def cholesky(a: _AsArrayC128, /, *, upper: bool = False) -> NDArray[np.complex128]: ... +@overload # fallback +def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> np.ndarray: ... + +# NOTE: Technically this also accepts boolean array-likes, but that case is not very useful, so we skip it. +# If you have a use case for it, please open an issue. +@overload # +int, n â‰Ĩ 0 +def matrix_power(a: _NestedSequence[int], n: _NonNegInt) -> NDArray[np.int_]: ... +@overload # +integer | ~object, n â‰Ĩ 0 +def matrix_power[ScalarT: np.integer | np.object_](a: _ArrayLike[ScalarT], n: _NonNegInt) -> NDArray[ScalarT]: ... +@overload # +float64, n < 0 +def matrix_power(a: _ToArrayF64, n: _NegInt) -> NDArray[np.float64]: ... +@overload # ~float64 +def matrix_power(a: _AsArrayF64, n: SupportsIndex) -> NDArray[np.float64]: ... +@overload # ~complex128 +def matrix_power(a: _AsArrayC128, n: SupportsIndex) -> NDArray[np.complex128]: ... +@overload # ~inexact32 +def matrix_power[ScalarT: _inexact32](a: _ArrayLike[ScalarT], n: SupportsIndex) -> NDArray[ScalarT]: ... +@overload # fallback +def matrix_power(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, n: SupportsIndex) -> np.ndarray: ... + +# NOTE: for real input the output dtype (floating/complexfloating) depends on the specific values +@overload # abstract `inexact` and `floating` (excluding concrete types) +def eig(a: NDArray[np.inexact[Never]]) -> EigResult: ... +@overload # ~complex128 +def eig(a: _AsArrayC128) -> EigResult[np.complex128]: ... +@overload # +float64 +def eig(a: _ToArrayF64) -> EigResult[np.complex128]: ... +@overload # ~complex64 +def eig(a: _ArrayLike[np.complex64]) -> EigResult[np.complex64]: ... +@overload # ~float32 +def eig(a: _ArrayLike[np.float32]) -> EigResult[np.complex64]: ... +@overload # fallback +def eig(a: _ArrayLikeComplex_co) -> EigResult: ... + +# +@overload # workaround for microsoft/pyright#10232 +def eigh(a: NDArray[Never], UPLO: _SideKind = "L") -> EighResult: ... +@overload # ~inexact32 +def eigh[ScalarT: _inexact32](a: _ArrayLike[ScalarT], UPLO: _SideKind = "L") -> EighResult[np.float32, ScalarT]: ... +@overload # +float64 +def eigh(a: _ToArrayF64, UPLO: _SideKind = "L") -> EighResult[np.float64, np.float64]: ... +@overload # ~complex128 +def eigh(a: _AsArrayC128, UPLO: _SideKind = "L") -> EighResult[np.float64, np.complex128]: ... +@overload # fallback +def eigh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> EighResult: ... + +# +@overload # ~inexact32, reduced|complete +def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["reduced", "complete"] = "reduced") -> QRResult[ScalarT]: ... +@overload # ~inexact32, r +def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["r"]) -> NDArray[ScalarT]: ... +@overload # ~inexact32, raw +def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["raw"]) -> _tuple2[NDArray[ScalarT]]: ... +@overload # +float64, reduced|complete +def qr(a: _ToArrayF64, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.float64]: ... +@overload # +float64, r +def qr(a: _ToArrayF64, mode: L["r"]) -> NDArray[np.float64]: ... +@overload # +float64, raw +def qr(a: _ToArrayF64, mode: L["raw"]) -> _tuple2[NDArray[np.float64]]: ... +@overload # ~complex128, reduced|complete +def qr(a: _AsArrayC128, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.complex128]: ... +@overload # ~complex128, r +def qr(a: _AsArrayC128, mode: L["r"]) -> NDArray[np.complex128]: ... +@overload # ~complex128, raw +def qr(a: _AsArrayC128, mode: L["raw"]) -> _tuple2[NDArray[np.complex128]]: ... +@overload # fallback, reduced|complete +def qr(a: _ArrayLikeComplex_co, mode: L["reduced", "complete"] = "reduced") -> QRResult: ... +@overload # fallback, r +def qr(a: _ArrayLikeComplex_co, mode: L["r"]) -> np.ndarray: ... +@overload # fallback, raw +def qr(a: _ArrayLikeComplex_co, mode: L["raw"]) -> _tuple2[np.ndarray]: ... + +# +@overload # workaround for microsoft/pyright#10232, compute_uv=True (default) +def svd(a: NDArray[Never], full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False) -> SVDResult: ... +@overload # workaround for microsoft/pyright#10232, compute_uv=False (positional) +def svd(a: NDArray[Never], full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... +@overload # workaround for microsoft/pyright#10232, compute_uv=False (keyword) +def svd(a: NDArray[Never], full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... +@overload # ~inexact32, compute_uv=True (default) +def svd[ScalarT: _inexact32]( + a: _ArrayLike[ScalarT], full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult[np.float32, ScalarT]: ... +@overload # ~inexact32, compute_uv=False (positional) +def svd(a: _ArrayLike[_inexact32], full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> NDArray[np.float32]: ... +@overload # ~inexact32, compute_uv=False (keyword) def svd( - a: _ArrayLikeInt_co, - full_matrices: bool, - compute_uv: L[False], - hermitian: bool = False, -) -> NDArray[float64]: ... -@overload + a: _ArrayLike[_inexact32], full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False +) -> NDArray[np.float32]: ... +@overload # +float64, compute_uv=True (default) def svd( - a: _ArrayLikeComplex_co, - full_matrices: bool = True, - *, - compute_uv: L[False], - hermitian: bool = False, -) -> NDArray[floating]: ... -@overload + a: _ToArrayF64, full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult[np.float64, np.float64]: ... +@overload # ~complex128, compute_uv=True (default) def svd( - a: _ArrayLikeComplex_co, - full_matrices: bool, - compute_uv: L[False], - hermitian: bool = False, -) -> NDArray[floating]: ... + a: _AsArrayC128, full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult[np.float64, np.complex128]: ... +@overload # +float64 | ~complex128, compute_uv=False (positional) +def svd(a: _ToArrayC128, full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> NDArray[np.float64]: ... +@overload # +float64 | ~complex128, compute_uv=False (keyword) +def svd(a: _ToArrayC128, full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False) -> NDArray[np.float64]: ... +@overload # fallback, compute_uv=True (default) +def svd( + a: _ArrayLikeComplex_co, full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult: ... +@overload # fallback, compute_uv=False (positional) +def svd(a: _ArrayLikeComplex_co, full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... +@overload # fallback, compute_uv=False (keyword) +def svd(a: _ArrayLikeComplex_co, full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... -# the ignored `overload-overlap` mypy error below is a false-positive -@overload -def svdvals( # type: ignore[overload-overlap] - x: _ArrayLike[np.float64 | np.complex128 | np.integer | np.bool] | _NestedSequence[complex], / -) -> NDArray[np.float64]: ... -@overload -def svdvals(x: _ArrayLike[np.float32 | np.complex64], /) -> NDArray[np.float32]: ... -@overload -def svdvals(x: _ArrayLikeNumber_co, /) -> NDArray[floating]: ... +# NOTE: for real input the output dtype (floating/complexfloating) depends on the specific values +@overload # abstract `inexact` and `floating` (excluding concrete types) +def eigvals(a: NDArray[np.inexact[Never]]) -> np.ndarray: ... +@overload # ~complex128 +def eigvals(a: _AsArrayC128) -> NDArray[np.complex128]: ... +@overload # +float64 +def eigvals(a: _ToArrayF64) -> NDArray[np.complex128] | NDArray[np.float64]: ... +@overload # ~complex64 +def eigvals(a: _ArrayLike[np.complex64]) -> NDArray[np.complex64]: ... +@overload # ~float32 +def eigvals(a: _ArrayLike[np.float32]) -> NDArray[np.complex64] | NDArray[np.float32]: ... +@overload # fallback +def eigvals(a: _ArrayLikeComplex_co) -> np.ndarray: ... -# TODO: Returns a scalar for 2D arrays and -# a `(x.ndim - 2)`` dimensionl array otherwise -def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = None) -> Any: ... +# keep in sync with svdvals +@overload # abstract `inexact` (excluding concrete types) +def eigvalsh(a: NDArray[np.inexact[Never]], UPLO: _SideKind = "L") -> NDArray[np.floating]: ... +@overload # ~inexact32 +def eigvalsh(a: _ArrayLike[_inexact32], UPLO: _SideKind = "L") -> NDArray[np.float32]: ... +@overload # +complex128 +def eigvalsh(a: _ToArrayC128, UPLO: _SideKind = "L") -> NDArray[np.float64]: ... +@overload # fallback +def eigvalsh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> NDArray[np.floating]: ... -# TODO: Returns `int` for <2D arrays and `intp` otherwise +# keep in sync with eigvalsh +@overload # abstract `inexact` (excluding concrete types) +def svdvals(a: NDArray[np.inexact[Never]], /) -> NDArray[np.floating]: ... +@overload # ~inexact32 +def svdvals(a: _ArrayLike[_inexact32], /) -> NDArray[np.float32]: ... +@overload # +complex128 +def svdvals(a: _ToArrayC128, /) -> NDArray[np.float64]: ... +@overload # fallback +def svdvals(a: _ArrayLikeComplex_co, /) -> NDArray[np.floating]: ... + +# +@overload # workaround for microsoft/pyright#10232 def matrix_rank( - A: _ArrayLikeComplex_co, + A: np.ndarray[_JustAnyShape, np.dtype[_to_complex]], tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, rtol: _ArrayLikeFloat_co | None = None, ) -> Any: ... - -@overload -def pinv( - a: _ArrayLikeInt_co, - rcond: _ArrayLikeFloat_co | None = None, +@overload # <2d +def matrix_rank( + A: _SupportsArray[_AtMost1D, np.dtype[_to_complex]] | Sequence[complex | _to_complex] | complex | _to_complex, + tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, - rtol: _ArrayLikeFloat_co | _NoValueType = ..., -) -> NDArray[float64]: ... -@overload -def pinv( - a: _ArrayLikeFloat_co, - rcond: _ArrayLikeFloat_co | None = None, + rtol: _ArrayLikeFloat_co | None = None, +) -> L[0, 1]: ... +@overload # =2d +def matrix_rank( + A: _ToArrayComplex_2d, + tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, - rtol: _ArrayLikeFloat_co | _NoValueType = ..., -) -> NDArray[floating]: ... -@overload -def pinv( - a: _ArrayLikeComplex_co, - rcond: _ArrayLikeFloat_co | None = None, + rtol: _ArrayLikeFloat_co | None = None, +) -> np.int_: ... +@overload # =3d +def matrix_rank( + A: _ToArrayComplex_3d, + tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, - rtol: _ArrayLikeFloat_co | _NoValueType = ..., -) -> NDArray[complexfloating]: ... + rtol: _ArrayLikeFloat_co | None = None, +) -> _Array1D[np.int_]: ... +@overload # â‰Ĩ4d +def matrix_rank( + A: _ArrayLike4ND[_to_complex] | _Sequence4ND[complex], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.int_]: ... +@overload # ?d +def matrix_rank( + A: _ArrayLikeComplex_co, + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> Any: ... + +# +@overload # workaround for microsoft/pyright#10232 +def cond(x: np.ndarray[_JustAnyShape, np.dtype[_to_complex]], p: _OrderKind | None = None) -> Any: ... +@overload # 2d ~inexact32 +def cond(x: _ArrayLike2D[_inexact32], p: _OrderKind | None = None) -> np.float32: ... +@overload # 2d +inexact64 +def cond(x: _ArrayLike2D[_to_inexact64] | _Sequence2D[complex], p: _OrderKind | None = None) -> np.float64: ... +@overload # 2d ~number +def cond(x: _ArrayLike2D[_to_complex], p: _OrderKind | None = None) -> np.floating: ... +@overload # >2d ~inexact32 +def cond(x: _ArrayLike3ND[_inexact32], p: _OrderKind | None = None) -> NDArray[np.float32]: ... +@overload # >2d +inexact64 +def cond(x: _ArrayLike3ND[_to_inexact64] | _Sequence3ND[complex], p: _OrderKind | None = None) -> NDArray[np.float64]: ... +@overload # >2d ~number +def cond(x: _ArrayLike3ND[_to_complex], p: _OrderKind | None = None) -> NDArray[np.floating]: ... +@overload # fallback +def cond(x: _ArrayLikeComplex_co, p: _OrderKind | None = None) -> Any: ... -# TODO: Returns a 2-tuple of scalars for 2D arrays and -# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +# keep in sync with `det` +@overload # workaround for microsoft/pyright#10232 +def slogdet(a: np.ndarray[_JustAnyShape, np.dtype[_to_complex]]) -> SlogdetResult: ... +@overload # 2d ~inexact32 +def slogdet[ScalarT: _inexact32](a: _ArrayLike2D[ScalarT]) -> SlogdetResult[np.float32, ScalarT]: ... +@overload # >2d ~inexact32 +def slogdet[ScalarT: _inexact32](a: _ArrayLike3ND[ScalarT]) -> SlogdetResult[NDArray[np.float32], NDArray[ScalarT]]: ... +@overload # 2d +float64 +def slogdet(a: _ArrayLike2D[_to_float64]) -> SlogdetResult[np.float64, np.float64]: ... +@overload # >2d +float64 +def slogdet(a: _ArrayLike3ND[_to_float64]) -> SlogdetResult[NDArray[np.float64], NDArray[np.float64]]: ... +@overload # 2d ~complex128 +def slogdet(a: _AsArrayC128_2d) -> SlogdetResult[np.float64, np.complex128]: ... +@overload # >2d ~complex128 +def slogdet(a: _AsArrayC128_3nd) -> SlogdetResult[NDArray[np.float64], NDArray[np.complex128]]: ... +@overload # fallback def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... -# TODO: Returns a 2-tuple of scalars for 2D arrays and -# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +# keep in sync with `slogdet` +@overload # workaround for microsoft/pyright#10232 +def det(a: np.ndarray[_JustAnyShape, np.dtype[_to_complex]]) -> Any: ... +@overload # 2d ~inexact32 +def det[ScalarT: _inexact32](a: _ArrayLike2D[ScalarT]) -> ScalarT: ... +@overload # >2d ~inexact32 +def det[ScalarT: _inexact32](a: _ArrayLike3ND[ScalarT]) -> NDArray[ScalarT]: ... +@overload # 2d +float64 +def det(a: _ArrayLike2D[_to_float64]) -> np.float64: ... +@overload # >2d +float64 +def det(a: _ArrayLike3ND[_to_float64]) -> NDArray[np.float64]: ... +@overload # 2d ~complex128 +def det(a: _AsArrayC128_2d) -> np.complex128: ... +@overload # >2d ~complex128 +def det(a: _AsArrayC128_3nd) -> NDArray[np.complex128]: ... +@overload # fallback def det(a: _ArrayLikeComplex_co) -> Any: ... -@overload -def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: float | None = None) -> tuple[ - NDArray[float64], - NDArray[float64], - int32, - NDArray[float64], -]: ... -@overload -def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = None) -> tuple[ - NDArray[floating], - NDArray[floating], - int32, - NDArray[floating], -]: ... -@overload -def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = None) -> tuple[ - NDArray[complexfloating], - NDArray[floating], - int32, - NDArray[floating], -]: ... +# +@overload # +float64, ~float64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _ArrayLike2D[_to_float64] | _Sequence2D[float], + b: _SupportsArray[ShapeT, np.dtype[np.floating | _to_integer]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.float64, np.float64]: ... +@overload # ~float64, +float64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _ArrayLike2D[np.floating | _to_integer] | _Sequence2D[float], + b: _SupportsArray[ShapeT, np.dtype[_to_float64]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.float64, np.float64]: ... +@overload # +complex128, ~complex128, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _ToArrayComplex_2d, b: _SupportsArray[ShapeT, np.dtype[np.complex128]], rcond: float | None = None +) -> _LstSqResult[ShapeT, np.complex128, np.float64]: ... +@overload # ~complex128, +complex128, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _AsArrayC128_2d, b: _SupportsArray[ShapeT, np.dtype[_to_complex]], rcond: float | None = None +) -> _LstSqResult[ShapeT, np.complex128, np.float64]: ... +@overload # ~float32, ~float32, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _ArrayLike2D[np.float32], b: _SupportsArray[ShapeT, np.dtype[np.float32]], rcond: float | None = None +) -> _LstSqResult[ShapeT, np.float32, np.float32]: ... +@overload # +complex64, ~complex64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _ArrayLike2D[_inexact32], b: _SupportsArray[ShapeT, np.dtype[np.complex64]], rcond: float | None = None +) -> _LstSqResult[ShapeT, np.complex64, np.float32]: ... +@overload # ~complex64, +complex64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _ArrayLike2D[np.complex64], b: _SupportsArray[ShapeT, np.dtype[_inexact32]], rcond: float | None = None +) -> _LstSqResult[ShapeT, np.complex64, np.float32]: ... +@overload # +float64, +float64, unknown shape +def lstsq( + a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = None +) -> _LstSqResult[_AnyShape, np.float64 | Any, np.float64 | Any]: ... +@overload # +complex128, +complex128, unknown shape +def lstsq( + a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = None +) -> _LstSqResult[_AnyShape, np.complex128 | Any, np.float64 | Any]: ... -@overload +# NOTE: This assumes that `axis` is only passed if `x` is >1d, and that `keepdims` is never passed positionally. +# keep in sync with `vector_norm` +@overload # +inexact64 (unsafe casting), axis=None, keepdims=False def norm( - x: ArrayLike, - ord: float | L["fro", "nuc"] | None = None, + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], + ord: _OrderKind | None = None, axis: None = None, keepdims: L[False] = False, -) -> floating: ... -@overload +) -> np.float64: ... +@overload # +inexact64 (unsafe casting), axis= (positional), keepdims=False def norm( - x: ArrayLike, - ord: float | L["fro", "nuc"] | None, - axis: SupportsInt | SupportsIndex | tuple[int, ...] | None, - keepdims: bool = False, -) -> Any: ... -@overload + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], + ord: _OrderKind | None, + axis: _Ax2, + keepdims: L[False] = False, +) -> NDArray[np.float64]: ... +@overload # +inexact64 (unsafe casting), axis= (keyword), keepdims=False def norm( - x: ArrayLike, - ord: float | L["fro", "nuc"] | None = None, + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], + ord: _OrderKind | None = None, *, - axis: SupportsInt | SupportsIndex | tuple[int, ...] | None, - keepdims: bool = False, -) -> Any: ... + axis: _Ax2, + keepdims: L[False] = False, +) -> NDArray[np.float64]: ... +@overload # +inexact64 (unsafe casting), shape known, keepdims=True +def norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_to_inexact64_unsafe]], + ord: _OrderKind | None = None, + axis: _Ax2 | None = None, + *, + keepdims: L[True], +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # +inexact64 (unsafe casting), shape unknown, keepdims=True +def norm( + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], + ord: _OrderKind | None = None, + axis: _Ax2 | None = None, + *, + keepdims: L[True], +) -> NDArray[np.float64]: ... +@overload # ~float16, axis=None, keepdims=False +def norm( + x: _ArrayLike[np.float16], ord: _OrderKind | None = None, axis: None = None, keepdims: L[False] = False +) -> np.float16: ... +@overload # ~float16, axis= (positional), keepdims=False +def norm(x: _ArrayLike[np.float16], ord: _OrderKind | None, axis: _Ax2, keepdims: L[False] = False) -> NDArray[np.float16]: ... +@overload # ~float16, axis= (keyword), keepdims=False +def norm( + x: _ArrayLike[np.float16], ord: _OrderKind | None = None, *, axis: _Ax2, keepdims: L[False] = False +) -> NDArray[np.float16]: ... +@overload # ~float16, shape known, keepdims=True +def norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[np.float16]], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.float16]]: ... +@overload # ~float16, shape unknown, keepdims=True +def norm( + x: _ArrayLike[np.float16], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> NDArray[np.float16]: ... +@overload # ~inexact32, axis=None, keepdims=False +def norm( + x: _ArrayLike[_inexact32], ord: _OrderKind | None = None, axis: None = None, keepdims: L[False] = False +) -> np.float32: ... +@overload # ~inexact32, axis= (positional), keepdims=False +def norm(x: _ArrayLike[_inexact32], ord: _OrderKind | None, axis: _Ax2, keepdims: L[False] = False) -> NDArray[np.float32]: ... +@overload # ~inexact32, axis= (keyword), keepdims=False +def norm( + x: _ArrayLike[_inexact32], ord: _OrderKind | None = None, *, axis: _Ax2, keepdims: L[False] = False +) -> NDArray[np.float32]: ... +@overload # ~inexact32, shape known, keepdims=True +def norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact32]], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... +@overload # ~inexact32, shape unknown, keepdims=True +def norm( + x: _ArrayLike[_inexact32], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> NDArray[np.float32]: ... +@overload # ~inexact80, axis=None, keepdims=False +def norm( + x: _ArrayLike[_inexact80], ord: _OrderKind | None = None, axis: None = None, keepdims: L[False] = False +) -> np.longdouble: ... +@overload # ~inexact80, axis= (positional), keepdims=False +def norm(x: _ArrayLike[_inexact80], ord: _OrderKind | None, axis: _Ax2, keepdims: L[False] = False) -> NDArray[np.longdouble]: ... +@overload # ~inexact80, axis= (keyword), keepdims=False +def norm( + x: _ArrayLike[_inexact80], ord: _OrderKind | None = None, *, axis: _Ax2, keepdims: L[False] = False +) -> NDArray[np.longdouble]: ... +@overload # ~inexact80, shape known, keepdims=True +def norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact80]], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... +@overload # ~inexact80, shape unknown, keepdims=True +def norm( + x: _ArrayLike[_inexact80], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> NDArray[np.longdouble]: ... +@overload # fallback +def norm(x: ArrayLike, ord: _OrderKind | None = None, axis: _Ax2 | None = None, keepdims: bool = False) -> Any: ... -@overload +# +@overload # +inexact64 (unsafe casting), ?d, keepdims=False def matrix_norm( - x: ArrayLike, + x: _SupportsArray[_JustAnyShape, np.dtype[_to_inexact64_unsafe]], /, *, - ord: float | L["fro", "nuc"] | None = "fro", + ord: _OrderKind | None = "fro", keepdims: L[False] = False, -) -> floating: ... -@overload +) -> NDArray[np.float64] | Any: ... +@overload # +inexact64 (unsafe casting), 2d, keepdims=False def matrix_norm( - x: ArrayLike, + x: _ArrayLike2D[_to_inexact64_unsafe] | _Sequence2D[complex], /, *, - ord: float | L["fro", "nuc"] | None = "fro", - keepdims: bool = False, -) -> Any: ... + ord: _OrderKind | None = "fro", + keepdims: L[False] = False, +) -> np.float64: ... +@overload # +inexact64 (unsafe casting), >2d, keepdims=False +def matrix_norm( + x: _ArrayLike3ND[_to_inexact64_unsafe] | _Sequence3D[complex], + /, + *, + ord: _OrderKind | None = "fro", + keepdims: L[False] = False, +) -> NDArray[np.float64]: ... +@overload # +inexact64 (unsafe casting), shape known, keepdims=True +def matrix_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_to_inexact64_unsafe]], + /, + *, + ord: _OrderKind | None = "fro", + keepdims: L[True], +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # +inexact64 (unsafe casting), ?d, keepdims=True +def matrix_norm( + x: _ArrayLike2ND[_to_inexact64_unsafe] | _Sequence2ND[complex], /, *, ord: _OrderKind | None = "fro", keepdims: L[True] +) -> NDArray[np.float64]: ... +@overload # ~float16, ?d, keepdims=False +def matrix_norm( + x: _SupportsArray[_JustAnyShape, np.dtype[np.float16]], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False +) -> NDArray[np.float16] | Any: ... +@overload # ~float16, 2d, keepdims=False +def matrix_norm(x: _ArrayLike2D[np.float16], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False) -> np.float16: ... +@overload # ~float16, >2d, keepdims=False +def matrix_norm( + x: _ArrayLike3ND[np.float16], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False +) -> NDArray[np.float16]: ... +@overload # ~float16, shape known, keepdims=True +def matrix_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[np.float16]], /, *, ord: _OrderKind | None = "fro", keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.float16]]: ... +@overload # ~float16, ?d, keepdims=True +def matrix_norm(x: _ArrayLike2ND[np.float16], /, *, ord: _OrderKind | None = "fro", keepdims: L[True]) -> NDArray[np.float16]: ... +@overload # ~inexact32, ?d, keepdims=False +def matrix_norm( + x: _SupportsArray[_JustAnyShape, np.dtype[_inexact32]], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False +) -> NDArray[np.float32] | Any: ... +@overload # ~inexact32, 2d, keepdims=False +def matrix_norm(x: _ArrayLike2D[_inexact32], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False) -> np.float32: ... +@overload # ~inexact32, >2d, keepdims=False +def matrix_norm( + x: _ArrayLike3ND[_inexact32], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False +) -> NDArray[np.float32]: ... +@overload # ~inexact32, shape known, keepdims=True +def matrix_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact32]], /, *, ord: _OrderKind | None = "fro", keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... +@overload # ~inexact32, ?d, keepdims=True +def matrix_norm(x: _ArrayLike2ND[_inexact32], /, *, ord: _OrderKind | None = "fro", keepdims: L[True]) -> NDArray[np.float32]: ... +@overload # ~inexact80, ?d, keepdims=False +def matrix_norm( + x: _SupportsArray[_JustAnyShape, np.dtype[_inexact80]], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False +) -> NDArray[np.longdouble] | Any: ... +@overload # ~inexact80, 2d, keepdims=False +def matrix_norm( + x: _ArrayLike2D[_inexact80], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False +) -> np.longdouble: ... +@overload # ~inexact80, >2d, keepdims=False +def matrix_norm( + x: _ArrayLike3ND[_inexact80], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False +) -> NDArray[np.longdouble]: ... +@overload # ~inexact80, shape known, keepdims=True +def matrix_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact80]], /, *, ord: _OrderKind | None = "fro", keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... +@overload # ~inexact80, ?d, keepdims=True +def matrix_norm( + x: _ArrayLike2ND[_inexact80], /, *, ord: _OrderKind | None = "fro", keepdims: L[True] +) -> NDArray[np.longdouble]: ... +@overload # fallback +def matrix_norm(x: ArrayLike, /, *, ord: _OrderKind | None = "fro", keepdims: bool = False) -> Any: ... -@overload +# keep in sync with `norm` +@overload # +inexact64 (unsafe casting), axis=None, keepdims=False def vector_norm( - x: ArrayLike, + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], /, *, + keepdims: L[False] = False, axis: None = None, ord: float | None = 2, +) -> np.float64: ... +@overload # +inexact64 (unsafe casting), axis=, keepdims=False +def vector_norm( + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], + /, + *, + axis: _Ax2, keepdims: L[False] = False, -) -> floating: ... -@overload + ord: float | None = 2, +) -> NDArray[np.float64]: ... +@overload # +inexact64 (unsafe casting), shape known, keepdims=True +def vector_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_to_inexact64_unsafe]], + /, + *, + axis: _Ax2 | None = None, + keepdims: L[True], + ord: float | None = 2, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # +inexact64 (unsafe casting), shape unknown, keepdims=True def vector_norm( - x: ArrayLike, + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], /, *, - axis: SupportsInt | SupportsIndex | tuple[int, ...], + axis: _Ax2 | None = None, + keepdims: L[True], ord: float | None = 2, - keepdims: bool = False, -) -> Any: ... +) -> NDArray[np.float64]: ... +@overload # ~float16, axis=None, keepdims=False +def vector_norm( + x: _ArrayLike[np.float16], /, *, axis: None = None, keepdims: L[False] = False, ord: float | None = 2 +) -> np.float16: ... +@overload # ~float16, axis= keepdims=False +def vector_norm( + x: _ArrayLike[np.float16], /, *, axis: _Ax2, keepdims: L[False] = False, ord: float | None = 2 +) -> NDArray[np.float16]: ... +@overload # ~float16, shape known, keepdims=True +def vector_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[np.float16]], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> np.ndarray[ShapeT, np.dtype[np.float16]]: ... +@overload # ~float16, shape unknown, keepdims=True +def vector_norm( + x: _ArrayLike[np.float16], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> NDArray[np.float16]: ... +@overload # ~inexact32, axis=None, keepdims=False +def vector_norm( + x: _ArrayLike[_inexact32], /, *, axis: None = None, keepdims: L[False] = False, ord: float | None = 2 +) -> np.float32: ... +@overload # ~inexact32, axis= keepdims=False +def vector_norm( + x: _ArrayLike[_inexact32], /, *, axis: _Ax2, keepdims: L[False] = False, ord: float | None = 2 +) -> NDArray[np.float32]: ... +@overload # ~inexact32, shape known, keepdims=True +def vector_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact32]], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... +@overload # ~inexact32, shape unknown, keepdims=True +def vector_norm( + x: _ArrayLike[_inexact32], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> NDArray[np.float32]: ... +@overload # ~inexact80, axis=None, keepdims=False +def vector_norm( + x: _ArrayLike[_inexact80], /, *, axis: None = None, keepdims: L[False] = False, ord: float | None = 2 +) -> np.longdouble: ... +@overload # ~inexact80, axis=, keepdims=False +def vector_norm( + x: _ArrayLike[_inexact80], /, *, axis: _Ax2, keepdims: L[False] = False, ord: float | None = 2 +) -> NDArray[np.longdouble]: ... +@overload # ~inexact80, shape known, keepdims=True +def vector_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact80]], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... +@overload # ~inexact80, shape unknown, keepdims=True +def vector_norm( + x: _ArrayLike[_inexact80], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> NDArray[np.longdouble]: ... +@overload # fallback +def vector_norm(x: ArrayLike, /, *, axis: _Ax2 | None = None, keepdims: bool = False, ord: float | None = 2) -> Any: ... # keep in sync with numpy._core.numeric.tensordot (ignoring `/, *`) @overload -def tensordot( - a: _ArrayLike[_NumericScalarT], - b: _ArrayLike[_NumericScalarT], - /, - *, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[_NumericScalarT]: ... +def tensordot[ScalarT: np.number | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], b: _ArrayLike[ScalarT], /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[ScalarT]: ... @overload def tensordot( - a: _ArrayLikeBool_co, - b: _ArrayLikeBool_co, - /, - *, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[np.bool_]: ... + a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.bool]: ... @overload def tensordot( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, - /, - *, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, + a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 ) -> NDArray[np.int_ | Any]: ... @overload def tensordot( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - /, - *, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, + a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 ) -> NDArray[np.float64 | Any]: ... @overload def tensordot( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - /, - *, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, + a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 ) -> NDArray[np.complex128 | Any]: ... -# TODO: Returns a scalar or array -def multi_dot( - arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], - *, - out: NDArray[Any] | None = None, +# +@overload +def multi_dot[ArrayT: np.ndarray]( + arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], *, out: ArrayT, +) -> ArrayT: ... +@overload +def multi_dot[ + AnyScalarT: ( + np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64, + np.float16, np.float32, np.float64, np.longdouble, np.complex64, np.complex128, np.clongdouble, + np.object_, np.timedelta64, + ), +](arrays: Sequence[_ArrayLike[AnyScalarT]], *, out: None = None) -> NDArray[AnyScalarT]: ... +@overload +def multi_dot(arrays: Sequence[_ArrayLikeBool_co], *, out: None = None) -> NDArray[np.bool]: ... +@overload +def multi_dot(arrays: Sequence[_ArrayLikeInt_co], *, out: None = None) -> NDArray[np.int64 | Any]: ... +@overload +def multi_dot(arrays: Sequence[_ArrayLikeFloat_co], *, out: None = None) -> NDArray[np.float64 | Any]: ... +@overload +def multi_dot(arrays: Sequence[_ArrayLikeComplex_co], *, out: None = None) -> NDArray[np.complex128 | Any]: ... +@overload +def multi_dot(arrays: Sequence[_ArrayLikeTD64_co], *, out: None = None) -> NDArray[np.timedelta64 | Any]: ... +@overload +def multi_dot[ScalarT: np.number | np.object_ | np.timedelta64]( + arrays: Sequence[_ArrayLike[ScalarT]], *, out: None = None +) -> NDArray[ScalarT]: ... + +# +@overload # workaround for microsoft/pyright#10232 +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[_JustAnyShape, DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[_AnyShape, DTypeT]: ... +@overload # 2d, known dtype +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[tuple[int, int], DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[tuple[int], DTypeT]: ... +@overload # 3d, known dtype +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[tuple[int, int, int], DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[tuple[int, int], DTypeT]: ... +@overload # 4d, known dtype +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[tuple[int, int, int, int], DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[tuple[int, int, int], DTypeT]: ... +@overload # nd like ~bool +def diagonal(x: _NestedSequence[list[bool]], /, *, offset: SupportsIndex = 0) -> NDArray[np.bool]: ... +@overload # nd like ~int +def diagonal(x: _NestedSequence[list[int]], /, *, offset: SupportsIndex = 0) -> NDArray[np.int_]: ... +@overload # nd like ~float +def diagonal(x: _NestedSequence[list[float]], /, *, offset: SupportsIndex = 0) -> NDArray[np.float64]: ... +@overload # nd like ~complex +def diagonal(x: _NestedSequence[list[complex]], /, *, offset: SupportsIndex = 0) -> NDArray[np.complex128]: ... +@overload # nd like ~bytes +def diagonal(x: _NestedSequence[list[bytes]], /, *, offset: SupportsIndex = 0) -> NDArray[np.bytes_]: ... +@overload # nd like ~str +def diagonal(x: _NestedSequence[list[str]], /, *, offset: SupportsIndex = 0) -> NDArray[np.str_]: ... +@overload # fallback +def diagonal(x: ArrayLike, /, *, offset: SupportsIndex = 0) -> np.ndarray: ... + +# +@overload # workaround for microsoft/pyright#10232 +def trace( + x: _SupportsArray[_JustAnyShape, np.dtype[_to_complex]], /, *, offset: SupportsIndex = 0, dtype: DTypeLike | None = None ) -> Any: ... +@overload # 2d known dtype, dtype=None +def trace[ScalarT: _to_complex](x: _ArrayLike2D[ScalarT], /, *, offset: SupportsIndex = 0, dtype: None = None) -> ScalarT: ... +@overload # 2d, dtype= +def trace[ScalarT: _to_complex]( + x: _ToArrayComplex_2d, /, *, offset: SupportsIndex = 0, dtype: _DTypeLike[ScalarT] +) -> ScalarT: ... +@overload # 2d bool +def trace(x: _Sequence2D[bool], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.bool: ... +@overload # 2d int +def trace(x: Sequence[list[int]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.int_: ... +@overload # 2d float +def trace(x: Sequence[list[float]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.float64: ... +@overload # 2d complex +def trace(x: Sequence[list[complex]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.complex128: ... +@overload # 3d known dtype, dtype=None +def trace[DTypeT: np.dtype[_to_complex]]( + x: _SupportsArray[tuple[int, int, int], DTypeT], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> np.ndarray[tuple[int], DTypeT]: ... +@overload # 3d, dtype= +def trace[ScalarT: _to_complex]( + x: _ToArrayComplex_3d, /, *, offset: SupportsIndex = 0, dtype: _DTypeLike[ScalarT] +) -> _Array1D[ScalarT]: ... +@overload # 3d+ known dtype, dtype=None +def trace[DTypeT: np.dtype[_to_complex]]( + x: _SupportsArray[_AtLeast3D, DTypeT], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> np.ndarray[tuple[int, *tuple[Any, ...]], DTypeT]: ... +@overload # 3d+, dtype= +def trace[ScalarT: _to_complex]( + x: _ArrayLike3ND[_to_complex] | _Sequence3ND[complex], /, *, offset: SupportsIndex = 0, dtype: _DTypeLike[ScalarT] +) -> np.ndarray[tuple[int, *tuple[Any, ...]], np.dtype[ScalarT]]: ... +@overload # 3d+ bool +def trace(x: _Sequence3ND[bool], /, *, offset: SupportsIndex = 0, dtype: None = None) -> NDArray[np.bool]: ... +@overload # 3d+ int +def trace(x: _Sequence2ND[list[int]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> NDArray[np.int_]: ... +@overload # 3d+ float +def trace(x: _Sequence2ND[list[float]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> NDArray[np.float64]: ... +@overload # 3d+ complex +def trace(x: _Sequence2ND[list[complex]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> NDArray[np.complex128]: ... +@overload # fallback +def trace(x: _ArrayLikeComplex_co, /, *, offset: SupportsIndex = 0, dtype: DTypeLike | None = None) -> Any: ... + +# +@overload # workaround for microsoft/pyright#10232 +def outer(x1: NDArray[Never], x2: NDArray[Never], /) -> _Array2D[Any]: ... +@overload # +bool, +bool +def outer(x1: _ToArrayBool_1d, x2: _ToArrayBool_1d, /) -> _Array2D[np.bool]: ... +@overload # ~int64, +int64 +def outer(x1: _AsArrayI64_1d, x2: _ToArrayInt_1d, /) -> _Array2D[np.int64]: ... +@overload # +int64, ~int64 +def outer(x1: _ToArrayInt_1d, x2: _AsArrayI64_1d, /) -> _Array2D[np.int64]: ... +@overload # ~timedelta64, +timedelta64 +def outer(x1: _ArrayLike1D[np.timedelta64], x2: _ArrayLike1D[_to_timedelta64], /) -> _Array2D[np.timedelta64]: ... +@overload # +timedelta64, ~timedelta64 +def outer(x1: _ArrayLike1D[_to_timedelta64], x2: _ArrayLike1D[np.timedelta64], /) -> _Array2D[np.timedelta64]: ... +@overload # ~float64, +float64 +def outer(x1: _AsArrayF64_1d, x2: _ToArrayF64_1d, /) -> _Array2D[np.float64]: ... +@overload # +float64, ~float64 +def outer(x1: _ToArrayF64_1d, x2: _AsArrayF64_1d, /) -> _Array2D[np.float64]: ... +@overload # ~complex128, +complex128 +def outer(x1: _AsArrayC128_1d, x2: _ToArrayComplex_1d, /) -> _Array2D[np.complex128]: ... +@overload # +complex128, ~complex128 +def outer(x1: _ToArrayComplex_1d, x2: _AsArrayC128_1d, /) -> _Array2D[np.complex128]: ... +@overload # ~ScalarT, ~ScalarT +def outer[ScalarT: np.number | np.object_](x1: _ArrayLike1D[ScalarT], x2: _ArrayLike1D[ScalarT], /) -> _Array2D[ScalarT]: ... +@overload # fallback +def outer(x1: _ToArrayComplex_1d, x2: _ToArrayComplex_1d, /) -> _Array2D[Any]: ... + +# note that this doesn't include bool, int_, float64, and complex128, as those require special-casing overloads +_AnyScalarT = TypeVar( + "_AnyScalarT", + np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.uint64, + np.float16, np.float32, np.longdouble, np.complex64, np.clongdouble, +) # fmt: skip + +# NOTE: we ignore UP047 because inlining `_AnyScalarT` would result in a lot of code duplication -def diagonal( - x: ArrayLike, # >= 2D array +# +@overload # ~T, ~T (we use constraints instead of a `: np.number` bound to prevent joins/unions) +def cross( # noqa: UP047 + x1: _ArrayLike1D2D[_AnyScalarT], + x2: _ArrayLike1D2D[_AnyScalarT], /, *, - offset: SupportsIndex = 0, -) -> NDArray[Any]: ... - -def trace( - x: ArrayLike, # >= 2D array + axis: SupportsIndex = -1, +) -> NDArray[_AnyScalarT]: ... # fmt: skip +@overload # ~int64, +int64 +def cross( + x1: _ArrayLike1D2D[np.int64] | _Sequence1D2D[int], + x2: _ArrayLike1D2D[np.integer] | _Sequence1D2D[int], /, *, - offset: SupportsIndex = 0, - dtype: DTypeLike | None = None, -) -> Any: ... - -@overload + axis: SupportsIndex = -1, +) -> NDArray[np.int64]: ... +@overload # +int64, ~int64 def cross( - x1: _ArrayLikeUInt_co, - x2: _ArrayLikeUInt_co, + x1: _ArrayLike1D2D[np.integer], + x2: _ArrayLike1D2D[np.int64], /, *, - axis: int = -1, -) -> NDArray[unsignedinteger]: ... -@overload + axis: SupportsIndex = -1, +) -> NDArray[np.int64]: ... +@overload # ~float64, +float64 def cross( - x1: _ArrayLikeInt_co, - x2: _ArrayLikeInt_co, + x1: _ArrayLike1D2D[np.float64] | _Sequence0D1D[list[float]], + x2: _ArrayLike1D2D[np.floating | np.integer] | _Sequence1D2D[float], /, *, - axis: int = -1, -) -> NDArray[signedinteger]: ... -@overload + axis: SupportsIndex = -1, +) -> NDArray[np.float64]: ... +@overload # +float64, ~float64 def cross( - x1: _ArrayLikeFloat_co, - x2: _ArrayLikeFloat_co, + x1: _ArrayLike1D2D[np.floating | np.integer] | _Sequence1D2D[float], + x2: _ArrayLike1D2D[np.float64] | _Sequence0D1D[list[float]], /, *, - axis: int = -1, -) -> NDArray[floating]: ... -@overload + axis: SupportsIndex = -1, +) -> NDArray[np.float64]: ... +@overload # ~complex128, +complex128 def cross( - x1: _ArrayLikeComplex_co, - x2: _ArrayLikeComplex_co, + x1: _ArrayLike1D2D[np.complex128] | _Sequence0D1D[list[complex]], + x2: _ArrayLike1D2D[np.number] | _Sequence1D2D[complex], /, *, - axis: int = -1, -) -> NDArray[complexfloating]: ... + axis: SupportsIndex = -1, +) -> NDArray[np.complex128]: ... +@overload # +complex128, ~complex128 +def cross( + x1: _ArrayLike1D2D[np.number] | _Sequence1D2D[complex], + x2: _ArrayLike1D2D[np.complex128] | _Sequence0D1D[list[complex]], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[np.complex128]: ... +@overload # ~object_, +object_ +def cross( + x1: _SupportsArray[tuple[int] | tuple[int, int], np.dtype[np.object_]], + x2: _ArrayLike1D2D[np.number | np.object_] | _Sequence1D2D[complex], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[np.object_]: ... +@overload # +object_, ~object_ +def cross( + x1: _ArrayLike1D2D[np.number | np.object_] | _Sequence1D2D[complex], + x2: _SupportsArray[tuple[int] | tuple[int, int], np.dtype[np.object_]], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[np.object_]: ... +@overload # fallback +def cross[ScalarT: np.number]( + x1: _ArrayLike1D2D[ScalarT], + x2: _ArrayLike1D2D[ScalarT], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[ScalarT]: ... -@overload -def matmul(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... -@overload -def matmul(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... -@overload -def matmul(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... -@overload -def matmul(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... -@overload -def matmul(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... +# These overloads can be grouped into three parts: +# - 16 overloads as workaround for microsoft/pyright#10232 +# - 9 overloads for the scalar cases (both args 1d) +# - 18 overloads for the non-scalar cases (at least one arg >1d) +@overload # ?d ~T, 1d ~T +def matmul( # noqa: UP047 + x1: _SupportsArray[_JustAnyShape, np.dtype[_AnyScalarT]], x2: _ArrayLike1D[_AnyScalarT], / +) -> NDArray[_AnyScalarT] | Any: ... +@overload # 1d ~T, ?d ~T +def matmul( # noqa: UP047 + x1: _ArrayLike1D[_AnyScalarT], x2: _SupportsArray[_JustAnyShape, np.dtype[_AnyScalarT]], / +) -> NDArray[_AnyScalarT] | Any: ... +@overload # ?d bool, 1d bool +def matmul(x1: _SupportsArray[_JustAnyShape, np.dtype[np.bool]], x2: _ToArrayBool_1d, /) -> NDArray[np.bool] | Any: ... +@overload # 1d bool, ?d bool +def matmul(x1: _ToArrayBool_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[np.bool]], /) -> NDArray[np.bool] | Any: ... +@overload # ?d ~int, 1d +int +def matmul(x1: _SupportsArray[_JustAnyShape, np.dtype[np.int64]], x2: _ToArrayInt_1d, /) -> NDArray[np.int64] | Any: ... +@overload # 1d +int, ?d ~int +def matmul(x1: _ToArrayInt_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[np.int64]], /) -> NDArray[np.int64] | Any: ... +@overload # ?d +int, 1d ~int +def matmul(x1: _SupportsArray[_JustAnyShape, np.dtype[_to_integer]], x2: _AsArrayI64_1d, /) -> NDArray[np.int64] | Any: ... +@overload # 1d ~int, ?d +int +def matmul(x1: _AsArrayI64_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[_to_integer]], /) -> NDArray[np.int64] | Any: ... +@overload # ?d ~float64, 1d +float64 +def matmul(x1: _SupportsArray[_JustAnyShape, np.dtype[np.float64]], x2: _ToArrayF64_1d, /) -> NDArray[np.float64] | Any: ... +@overload # 1d +float64, ?d ~float64 +def matmul(x1: _ToArrayF64_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[np.float64]], /) -> NDArray[np.float64] | Any: ... +@overload # ?d +float64, 1d ~float64 +def matmul(x1: _SupportsArray[_JustAnyShape, np.dtype[_to_float64]], x2: _AsArrayF64_1d, /) -> NDArray[np.float64] | Any: ... +@overload # 1d ~float64, ?d +float64 +def matmul(x1: _AsArrayF64_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[_to_float64]], /) -> NDArray[np.float64] | Any: ... +@overload # ?d ~complex128, 1d +complex128 +def matmul( + x1: _SupportsArray[_JustAnyShape, np.dtype[np.complex128]], x2: _ToArrayC128_1d, / +) -> NDArray[np.complex128] | Any: ... +@overload # 1d +complex128, ?d ~complex128 +def matmul( + x1: _ToArrayC128_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[np.complex128]], / +) -> NDArray[np.complex128] | Any: ... +@overload # ?d +complex128, 1d ~complex128 +def matmul( + x1: _SupportsArray[_JustAnyShape, np.dtype[_to_complex128_co]], x2: _AsArrayC128_1d, / +) -> NDArray[np.complex128] | Any: ... +@overload # 1d ~complex128, ?d +complex128 +def matmul( + x1: _AsArrayC128_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[_to_complex128_co]], / +) -> NDArray[np.complex128] | Any: ... # end workaround +@overload # 1d ~T, 1d ~T +def matmul(x1: _ArrayLike1D[_AnyScalarT], x2: _ArrayLike1D[_AnyScalarT], /) -> _AnyScalarT: ... # noqa: UP047 +@overload # 1d +bool, 1d +bool +def matmul(x1: _ToArrayBool_1d, x2: _ToArrayBool_1d, /) -> np.bool: ... +@overload # 1d ~int, 1d +int +def matmul(x1: _AsArrayI64_1d, x2: _ToArrayInt_1d, /) -> np.int64: ... +@overload # 1d +int, 1d ~int +def matmul(x1: _ToArrayInt_1d, x2: _AsArrayI64_1d, /) -> np.int64: ... +@overload # 1d ~float64, 1d +float64 +def matmul(x1: _AsArrayF64_1d, x2: _ToArrayF64_1d, /) -> np.float64: ... +@overload # 1d +float64, 1d ~float64 +def matmul(x1: _ToArrayF64_1d, x2: _AsArrayF64_1d, /) -> np.float64: ... +@overload # 1d ~complex128, 1d +complex128 +def matmul(x1: _AsArrayC128_1d, x2: _ToArrayComplex_1d, /) -> np.complex128: ... +@overload # 1d +complex128, 1d ~complex128 +def matmul(x1: _ToArrayComplex_1d, x2: _AsArrayC128_1d, /) -> np.complex128: ... +@overload # 1d fallback, 1d fallback +def matmul(x1: _ToArrayComplex_1d, x2: _ToArrayComplex_1d, /) -> Any: ... # end 1d x 1d +@overload # >=1d ~T, >=2d ~T +def matmul(x1: _ArrayLike1ND[_AnyScalarT], x2: _ArrayLike2ND[_AnyScalarT], /) -> NDArray[_AnyScalarT]: ... # noqa: UP047 +@overload # >=2d ~T, >=1d ~T +def matmul(x1: _ArrayLike2ND[_AnyScalarT], x2: _ArrayLike1ND[_AnyScalarT], /) -> NDArray[_AnyScalarT]: ... # noqa: UP047 +@overload # >=1d +bool, >=2d +bool +def matmul(x1: _ToArrayBool_1nd, x2: _ToArrayBool_2nd, /) -> NDArray[np.bool]: ... +@overload # >=2d +bool, >=1d +bool +def matmul(x1: _ToArrayBool_2nd, x2: _ToArrayBool_1nd, /) -> NDArray[np.bool]: ... +@overload # >=1d ~int, >=2d +int +def matmul(x1: _AsArrayI64_1nd, x2: _ToArrayInt_2nd, /) -> NDArray[np.int64]: ... +@overload # >=2d ~int, >=1d +int +def matmul(x1: _AsArrayI64_2nd, x2: _ToArrayInt_1nd, /) -> NDArray[np.int64]: ... +@overload # >=1d +int, >=2d ~int +def matmul(x1: _ToArrayInt_1nd, x2: _AsArrayI64_2nd, /) -> NDArray[np.int64]: ... +@overload # >=2d +int, >=1d ~int +def matmul(x1: _ToArrayInt_2nd, x2: _AsArrayI64_1nd, /) -> NDArray[np.int64]: ... +@overload # >=1d ~float64, >=2d +float64 +def matmul(x1: _AsArrayF64_1nd, x2: _ToArrayF64_2nd, /) -> NDArray[np.float64]: ... +@overload # >=2d ~float64, >=1d +float64 +def matmul(x1: _AsArrayF64_2nd, x2: _ToArrayF64_1nd, /) -> NDArray[np.float64]: ... +@overload # >=1d +float64, >=2d ~float64 +def matmul(x1: _ToArrayF64_1nd, x2: _AsArrayF64_2nd, /) -> NDArray[np.float64]: ... +@overload # >=2d +float64, >=1d ~float64 +def matmul(x1: _ToArrayF64_2nd, x2: _AsArrayF64_1nd, /) -> NDArray[np.float64]: ... +@overload # >=1d ~complex128, >=2d +complex128 +def matmul(x1: _AsArrayC128_1nd, x2: _ToArrayC128_2nd, /) -> NDArray[np.complex128]: ... +@overload # >=2d ~complex128, >=1d +complex128 +def matmul(x1: _AsArrayC128_2nd, x2: _ToArrayC128_1nd, /) -> NDArray[np.complex128]: ... +@overload # >=1d +complex128, >=2d ~complex128 +def matmul(x1: _ToArrayC128_1nd, x2: _AsArrayC128_2nd, /) -> NDArray[np.complex128]: ... +@overload # >=2d +complex128, >=1d ~complex128 +def matmul(x1: _ToArrayC128_2nd, x2: _AsArrayC128_1nd, /) -> NDArray[np.complex128]: ... +@overload # >=1d fallback, >=2d fallback +def matmul(x1: _ToArrayComplex_1nd, x2: _ToArrayComplex_2nd, /) -> NDArray[Any]: ... +@overload # >=2d fallback, >=1d fallback +def matmul(x1: _ToArrayComplex_2nd, x2: _ToArrayComplex_1nd, /) -> NDArray[Any]: ... diff --git a/numpy/linalg/lapack_lite/fortran.py b/numpy/linalg/lapack_lite/fortran.py index 22eb666ef26f..6cf26f2adf2d 100644 --- a/numpy/linalg/lapack_lite/fortran.py +++ b/numpy/linalg/lapack_lite/fortran.py @@ -104,8 +104,8 @@ def fortranSourceLines(fo): break yield numberingiter.lineno, ''.join(lines) else: - raise ValueError("jammed: continuation line not expected: %s:%d" % - (fo.name, numberingiter.lineno)) + raise ValueError("jammed: continuation line not expected: " + f"{fo.name}:{numberingiter.lineno}") def getDependencies(filename): """For a Fortran source file, return a list of routines declared as EXTERNAL diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index b3744024fd88..88096edb2ed7 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -329,7 +329,7 @@ def _stride_comb_iter(x): xi[...] = x xi = xi.view(x.__class__) assert_(np.all(xi == x)) - yield xi, "stride_" + "_".join(["%+d" % j for j in repeats]) + yield xi, "stride_" + "_".join(f"{j:+}" for j in repeats) # generate also zero strides if possible if x.ndim >= 1 and x.shape[-1] == 1: @@ -604,7 +604,7 @@ class TestEigvals(EigvalsCases): @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) def test_types(self, dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - assert_equal(linalg.eigvals(x).dtype, dtype) + assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype)) x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype)) @@ -614,7 +614,7 @@ class ArraySubclass(np.ndarray): pass a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) res = linalg.eigvals(a) - assert_(res.dtype.type is np.float64) + assert_(res.dtype.type is np.complex128) assert_equal((0, 1), res.shape) # This is just for documentation, it might make sense to change: assert_(isinstance(res, np.ndarray)) @@ -643,8 +643,8 @@ class TestEig(EigCases): def test_types(self, dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) w, v = np.linalg.eig(x) - assert_equal(w.dtype, dtype) - assert_equal(v.dtype, dtype) + assert_equal(w.dtype, get_complex_dtype(dtype)) + assert_equal(v.dtype, get_complex_dtype(dtype)) x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) w, v = np.linalg.eig(x) @@ -657,8 +657,8 @@ class ArraySubclass(np.ndarray): pass a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) res, res_v = linalg.eig(a) - assert_(res_v.dtype.type is np.float64) - assert_(res.dtype.type is np.float64) + assert_(res_v.dtype.type is np.complex128) + assert_(res.dtype.type is np.complex128) assert_equal(a.shape, res_v.shape) assert_equal((0, 1), res.shape) # This is just for documentation, it might make sense to change: @@ -1000,8 +1000,8 @@ def do(self, a, b, tags): np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0) expect_resids = np.asarray(expect_resids) if np.asarray(b).ndim == 1: - expect_resids.shape = (1,) - assert_equal(residuals.shape, expect_resids.shape) + expect_resids = expect_resids.reshape((1,)) + assert_equal(residuals.shape, expect_resids.shape) else: expect_resids = np.array([]).view(type(x)) assert_almost_equal(residuals, expect_resids) @@ -1091,7 +1091,7 @@ def tz(M): for mat in self.rshft_all: tz(mat.astype(dt)) - if dt != object: + if np.dtype(dt).type is not np.object_: tz(self.stacked.astype(dt)) def test_power_is_one(self, dt): @@ -1102,7 +1102,7 @@ def tz(mat): for mat in self.rshft_all: tz(mat.astype(dt)) - if dt != object: + if np.dtype(dt).type is not np.object_: tz(self.stacked.astype(dt)) def test_power_is_two(self, dt): @@ -1114,7 +1114,7 @@ def tz(mat): for mat in self.rshft_all: tz(mat.astype(dt)) - if dt != object: + if np.dtype(dt).type is not np.object_: tz(self.stacked.astype(dt)) def test_power_is_minus_one(self, dt): @@ -2440,3 +2440,22 @@ def test_vector_norm_empty(): assert_equal(np.linalg.vector_norm(x, ord=1), 0) assert_equal(np.linalg.vector_norm(x, ord=2), 0) assert_equal(np.linalg.vector_norm(x, ord=np.inf), 0) + +def test_empty_matrix_rank(): + assert_equal(matrix_rank(np.zeros((0, 0))), 0) + assert_equal(matrix_rank(np.zeros((0, 5))), 0) + assert_equal(matrix_rank(np.zeros((5, 0))), 0) + + result = matrix_rank(np.zeros((0, 5, 5))) + assert_equal(result.shape, (0,)) + assert_equal(result.dtype, np.intp) + + result = matrix_rank(np.zeros((3, 0, 5))) + assert_equal(result, np.array([0, 0, 0])) + + result = matrix_rank(np.zeros((2, 5, 0))) + assert_equal(result, np.array([0, 0])) + + result = matrix_rank(np.zeros((2, 3, 0, 4))) + assert_equal(result.shape, (2, 3)) + assert_equal(result, np.zeros((2, 3), dtype=np.intp)) diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py index 053e7130da63..12d10bc7eecc 100644 --- a/numpy/linalg/tests/test_regression.py +++ b/numpy/linalg/tests/test_regression.py @@ -7,6 +7,7 @@ from numpy import arange, array, dot, float64, linalg, transpose from numpy.testing import ( assert_, + assert_almost_equal, assert_array_almost_equal, assert_array_equal, assert_array_less, @@ -180,3 +181,10 @@ def test_openblas_threading(self): if mismatches != 0: assert False, ("unexpected result from matmul, " "probably due to OpenBLAS threading issues") + + def test_norm_linux_arm(self): + # gh-30816 + a = np.arange(20000) / 50000 + b = a + 1j * np.roll(np.flip(a), 12345) + norm = np.linalg.norm(b) + assert_almost_equal(norm, 46.18628948075393) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index e0ead317d9f6..560c7dcefdcf 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -20,6 +20,7 @@ """ import builtins +import datetime as dt import functools import inspect import operator @@ -35,7 +36,7 @@ amax, amin, angle, - array as narray, # noqa: F401 + array as narray, bool_, expand_dims, finfo, # noqa: F401 @@ -222,11 +223,22 @@ def _recursive_fill_value(dtype, f): # We wrap into `array` here, which ensures we use NumPy cast rules # for integer casts, this allows the use of 99999 as a fill value # for int8. - # TODO: This is probably a mess, but should best preserve behavior? - vals = tuple( - np.array(_recursive_fill_value(dtype[name], f)) - for name in dtype.names) - return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d + vals = [] + for name in dtype.names: + field_dtype = dtype[name] + val = _recursive_fill_value(field_dtype, f) + if np.issubdtype(field_dtype, np.datetime64): + if isinstance(val, dt.date): + val = np.datetime64(val) + val = np.array(val) + elif isinstance(val, (int, np.integer)): + val = np.array(val).astype(field_dtype) + else: + val = np.array(val) + else: + val = np.array(val) + vals.append(val) + return np.array(tuple(vals), dtype=dtype)[()] # decay to void scalar from 0d elif dtype.subdtype: subtype, shape = dtype.subdtype subval = _recursive_fill_value(subtype, f) @@ -710,12 +722,12 @@ def getdata(a, subok=True): Return the data of a masked array as an ndarray. Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``, - else return `a` as a ndarray or subclass (depending on `subok`) if not. + else return `a` as an ndarray or subclass (depending on `subok`) if not. Parameters ---------- a : array_like - Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. + Input ``MaskedArray``, alternatively an ndarray or a subclass thereof. subok : bool Whether to force the output to be a `pure` ndarray (False) or to return a subclass of ndarray if appropriate (True, default). @@ -1105,8 +1117,7 @@ def reduce(self, target, axis=0, dtype=None): if t.shape == (): t = t.reshape(1) if m is not nomask: - m = make_mask(m, copy=True) - m.shape = (1,) + m = make_mask(m, copy=True).reshape((1,)) if m is nomask: tr = self.f.reduce(t, axis) @@ -1978,7 +1989,7 @@ def masked_where(condition, a, copy=True): (cshape, ashape) = (cond.shape, a.shape) if cshape and cshape != ashape: raise IndexError("Inconsistent shape between the condition and the input" - " (got %s and %s)" % (cshape, ashape)) + f" (got {cshape} and {ashape})") if hasattr(a, '_mask'): cond = mask_or(cond, a._mask) cls = type(a) @@ -2277,7 +2288,7 @@ def masked_object(x, value, copy=True, shrink=True): -------- >>> import numpy as np >>> import numpy.ma as ma - >>> food = np.array(['green_eggs', 'ham'], dtype=object) + >>> food = np.array(['green_eggs', 'ham'], dtype=np.object_) >>> # don't eat spoiled food >>> eat = ma.masked_object(food, 'green_eggs') >>> eat @@ -2286,7 +2297,7 @@ def masked_object(x, value, copy=True, shrink=True): fill_value='green_eggs', dtype=object) >>> # plain ol` ham is boring - >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) + >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=np.object_) >>> eat = ma.masked_object(fresh_food, 'green_eggs') >>> eat masked_array(data=['cheese', 'ham', 'pineapple'], @@ -2403,7 +2414,7 @@ def masked_invalid(a, copy=True): -------- >>> import numpy as np >>> import numpy.ma as ma - >>> a = np.arange(5, dtype=float) + >>> a = np.arange(5, dtype=np.float64) >>> a[2] = np.nan >>> a[3] = np.inf >>> a @@ -2579,7 +2590,7 @@ def flatten_sequence(iterable): """ for elm in iter(iterable): - if hasattr(elm, '__iter__'): + if hasattr(elm, "__iter__") and not isinstance(elm, (str, bytes)): yield from flatten_sequence(elm) else: yield elm @@ -2597,7 +2608,7 @@ def flatten_sequence(iterable): if len(inishape) > 1: newshape = list(out.shape) newshape[0] = inishape - out.shape = tuple(flatten_sequence(newshape)) + out = out.reshape(tuple(flatten_sequence(newshape))) return out @@ -2713,8 +2724,7 @@ def __getitem__(self, indx): _mask = self.maskiter.__getitem__(indx) if isinstance(_mask, ndarray): # set shape to match that of data; this is needed for matrices - _mask.shape = result.shape - result._mask = _mask + result._mask = _mask.reshape(result.shape) elif isinstance(_mask, np.void): return mvoid(result, mask=_mask, hardmask=self.ma._hardmask) elif _mask: # Just a scalar, masked @@ -2941,7 +2951,7 @@ def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, # the shapes were the same, so we can at least # avoid that path if data._mask.shape != data.shape: - data._mask.shape = data.shape + data._mask = data._mask.reshape(data.shape) else: # Case 2. : With a mask in input. # If mask is boolean, create an array of True or False @@ -3116,7 +3126,7 @@ def __array_finalize__(self, obj): # Finalize the mask if self._mask is not nomask: try: - self._mask.shape = self.shape + self._mask = self._mask.reshape(self.shape) except ValueError: self._mask = nomask except (TypeError, AttributeError): @@ -3152,7 +3162,7 @@ def __array_wrap__(self, obj, context=None, return_scalar=False): # Get the domain mask domain = ufunc_domain.get(func) if domain is not None: - # Take the domain, and make sure it's a ndarray + # Take the domain, and make sure it's an ndarray with np.errstate(divide='ignore', invalid='ignore'): # The result may be masked for two (unary) domains. # That can't really be right as some domains drop @@ -3478,6 +3488,15 @@ def __setitem__(self, indx, value): _mask[indx] = mindx return + def _set_dtype(self, dtype): + super()._set_dtype(dtype) + if self._mask is not nomask: + self._mask = self._mask.view(make_mask_descr(dtype), ndarray) + try: + self._mask = self._mask.reshape(self.shape) + except (AttributeError, TypeError): + pass + # Define so that we can overwrite the setter. @property def dtype(self): @@ -3485,15 +3504,13 @@ def dtype(self): @dtype.setter def dtype(self, dtype): - super(MaskedArray, type(self)).dtype.__set__(self, dtype) - if self._mask is not nomask: - self._mask = self._mask.view(make_mask_descr(dtype), ndarray) - # Try to reset the shape of the mask (if we don't have a void). - # This raises a ValueError if the dtype change won't work. - try: - self._mask.shape = self.shape - except (AttributeError, TypeError): - pass + # DEPRECATED 2026-02-06, NumPy 2.5 + warnings.warn( + "Setting the dtype on a MaskedArray has been deprecated in " + "NumPy 2.5.\nInstead of changing the dtype on an array x, " + "create a new array with x.view(new_dtype)", + DeprecationWarning, stacklevel=2) + self._set_dtype(dtype) @property def shape(self): @@ -3505,7 +3522,7 @@ def shape(self, shape): # Cannot use self._mask, since it may not (yet) exist when a # masked matrix sets the shape. if getmask(self) is not nomask: - self._mask.shape = self.shape + self._mask = self._mask.reshape(self.shape) def __setmask__(self, mask, copy=False): """ @@ -3552,7 +3569,7 @@ def __setmask__(self, mask, copy=False): mask = mask.astype(mdtype) # Mask is a sequence else: - # Make sure the new mask is a ndarray with the proper dtype + # Make sure the new mask is an ndarray with the proper dtype try: copy = None if not copy else True mask = np.array(mask, copy=copy, dtype=mdtype) @@ -3574,7 +3591,7 @@ def __setmask__(self, mask, copy=False): current_mask.flat = mask # Reshape if needed if current_mask.shape: - current_mask.shape = self.shape + self._mask = current_mask.reshape(self.shape) return _set_mask = __setmask__ @@ -3597,7 +3614,7 @@ def mask(self, value): def recordmask(self): """ Get or set the mask of the array if it has no named fields. For - structured arrays, returns a ndarray of booleans where entries are + structured arrays, returns an ndarray of booleans where entries are ``True`` if **all** the fields are masked, ``False`` otherwise: >>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], @@ -4779,8 +4796,9 @@ def reshape(self, *s, **kwargs): Notes ----- - The reshaping operation cannot guarantee that a copy will not be made, - to modify the shape in place, use ``a.shape = s`` + By default, the reshaping operation will make a copy if a view + with different strides is not possible. To ensure a view, + pass ``copy=False``. Examples -------- @@ -5710,7 +5728,7 @@ def argmin(self, axis=None, fill_value=None, out=None, *, -------- >>> import numpy as np >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0]) - >>> x.shape = (2,2) + >>> x = x.reshape((2,2)) >>> x masked_array( data=[[--, --], @@ -6345,7 +6363,7 @@ def tolist(self, fill_value=None): inishape = self.shape result = np.array(self._data.ravel(), dtype=object) result[_mask.ravel()] = None - result.shape = inishape + result = result.reshape(inishape) return result.tolist() def tobytes(self, fill_value=None, order='C'): @@ -6530,11 +6548,11 @@ class mvoid(MaskedArray): Fake a 'void' object to use for masked array with structured dtypes. """ - def __new__(self, data, mask=nomask, dtype=None, fill_value=None, + def __new__(cls, data, mask=nomask, dtype=None, fill_value=None, hardmask=False, copy=False, subok=True): copy = None if not copy else True _data = np.array(data, copy=copy, subok=subok, dtype=dtype) - _data = _data.view(self) + _data = _data.view(cls) _data._hardmask = hardmask if mask is not nomask: if isinstance(mask, np.void): @@ -7930,7 +7948,7 @@ def where(condition, x=_NoValue, y=_NoValue): Returns ------- out : MaskedArray - An masked array with `masked` elements where the condition is masked, + A masked array with `masked` elements where the condition is masked, elements from `x` where `condition` is True, and elements from `y` elsewhere. @@ -8078,7 +8096,7 @@ def nmask(x): return d -def round_(a, decimals=0, out=None): +def round(a, decimals=0, out=None): """ Return a copy of a, rounded to 'decimals' places. @@ -8112,7 +8130,7 @@ def round_(a, decimals=0, out=None): masked_array(data=[11.2, -3.973, 0.801, --], mask=[False, False, False, True], fill_value=1e+20) - >>> ma.round_(masked_x) + >>> ma.round(masked_x) masked_array(data=[11.0, -4.0, 1.0, --], mask=[False, False, False, True], fill_value=1e+20) @@ -8120,7 +8138,7 @@ def round_(a, decimals=0, out=None): masked_array(data=[11.2, -4.0, 0.8, --], mask=[False, False, False, True], fill_value=1e+20) - >>> ma.round_(masked_x, decimals=-1) + >>> ma.round(masked_x, decimals=-1) masked_array(data=[10.0, -0.0, 0.0, --], mask=[False, False, False, True], fill_value=1e+20) @@ -8134,8 +8152,62 @@ def round_(a, decimals=0, out=None): return out -round = round_ +def round_(a, decimals=0, out=None): + """ + Return a copy of a, rounded to 'decimals' places. + + .. deprecated:: 2.5 + `numpy.ma.round_` is deprecated. Use `numpy.ma.round` instead. + + When 'decimals' is negative, it specifies the number of positions + to the left of the decimal point. The real and imaginary parts of + complex numbers are rounded separately. Nothing is done if the + array is not of float type and 'decimals' is greater than or equal + to 0. + Parameters + ---------- + decimals : int + Number of decimals to round to. May be negative. + out : array_like + Existing array to use for output. + If not given, returns a default copy of a. + + Notes + ----- + If out is given and does not have a mask attribute, the mask of a + is lost! + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round_(masked_x) + masked_array(data=[11.0, -4.0, 1.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round(masked_x, decimals=1) + masked_array(data=[11.2, -4.0, 0.8, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round_(masked_x, decimals=-1) + masked_array(data=[10.0, -0.0, 0.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + """ + warnings.warn( + "numpy.ma.round_ is deprecated. Use numpy.ma.round instead.", + DeprecationWarning, + stacklevel=2, + ) + return round(a, decimals, out) def _mask_propagate(a, axis): """ @@ -8247,9 +8319,9 @@ def inner(a, b): fa = filled(a, 0) fb = filled(b, 0) if fa.ndim == 0: - fa.shape = (1,) + fa = fa.reshape((1,)) if fb.ndim == 0: - fb.shape = (1,) + fb = fb.reshape((1,)) return np.inner(fa, fb).view(MaskedArray) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 62dc32c13d97..9f7d97c6374e 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2,8 +2,8 @@ import datetime as dt import types -from _typeshed import Incomplete -from collections.abc import Callable, Sequence +from _typeshed import Incomplete, SupportsLenAndGetItem +from collections.abc import Buffer, Callable, Iterator, Sequence from typing import ( Any, Concatenate, @@ -12,21 +12,22 @@ from typing import ( Literal, Never, NoReturn, + Protocol, Self, SupportsComplex, SupportsFloat, SupportsIndex, SupportsInt, - TypeAlias, Unpack, final, overload, + override, + type_check_only, ) -from typing_extensions import Buffer, ParamSpec, TypeIs, TypeVar, override +from typing_extensions import TypeIs, TypeVar, deprecated import numpy as np from numpy import ( - _AnyShapeT, _HasDType, _HasDTypeWithRealAndImag, _ModeKind, @@ -40,16 +41,11 @@ from numpy import ( amin, bool_, bytes_, - character, complex128, complexfloating, datetime64, dtype, - dtypes, expand_dims, - flexible, - float16, - float32, float64, floating, generic, @@ -65,9 +61,7 @@ from numpy import ( signedinteger, str_, timedelta64, - ufunc, unsignedinteger, - void, ) from numpy._core.fromnumeric import _UFuncKwargs # type-check only from numpy._globals import _NoValueType @@ -95,6 +89,7 @@ from numpy._typing import ( _ArrayLikeTD64_co, _ArrayLikeUInt_co, _CharLike_co, + _DT64Codes, _DTypeLike, _DTypeLikeBool, _DTypeLikeVoid, @@ -291,59 +286,76 @@ __all__ = [ "zeros_like", ] -_ShapeT = TypeVar("_ShapeT", bound=_Shape) -_ShapeOrAnyT = TypeVar("_ShapeOrAnyT", bound=_Shape, default=_AnyShape) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_DTypeT = TypeVar("_DTypeT", bound=dtype) -_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) -_ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) -_MArrayT = TypeVar("_MArrayT", bound=MaskedArray[Any, Any]) -_ScalarT = TypeVar("_ScalarT", bound=generic) -_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) -_NumberT = TypeVar("_NumberT", bound=number) -_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) -_ArangeScalarT = TypeVar("_ArangeScalarT", bound=_ArangeScalar) -_UFuncT_co = TypeVar( - "_UFuncT_co", - # the `| Callable` simplifies self-binding to the ufunc's callable signature - bound=np.ufunc | Callable[..., object], - default=np.ufunc, - covariant=True, -) -_Pss = ParamSpec("_Pss") -_T = TypeVar("_T") +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) +# the additional `Callable[...]` bound simplifies self-binding to the ufunc's callable signature +_UFuncT_co = TypeVar("_UFuncT_co", bound=np.ufunc | Callable[..., object], default=np.ufunc, covariant=True) + +_AnyNumericScalarT = TypeVar( + "_AnyNumericScalarT", + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, + np.object_, +) # fmt: skip -_Ignored: TypeAlias = object +type _RealNumber = np.floating | np.integer +type _InnerScalar = np.number | np.bool | np.timedelta64 + +type _Ignored = object # A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` -_MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] -_Masked1D: TypeAlias = MaskedArray[tuple[int], dtype[_ScalarT]] - -_MaskedArrayUInt_co: TypeAlias = _MaskedArray[unsignedinteger | np.bool] -_MaskedArrayInt_co: TypeAlias = _MaskedArray[integer | np.bool] -_MaskedArrayFloat64_co: TypeAlias = _MaskedArray[floating[_64Bit] | float32 | float16 | integer | np.bool] -_MaskedArrayFloat_co: TypeAlias = _MaskedArray[floating | integer | np.bool] -_MaskedArrayComplex128_co: TypeAlias = _MaskedArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] -_MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] -_MaskedArrayNumber_co: TypeAlias = _MaskedArray[number | np.bool] -_MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] - -_ArrayInt_co: TypeAlias = NDArray[integer | bool_] -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] - -_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co -_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co -_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co -_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None -_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None -_ArangeScalar: TypeAlias = floating | integer | datetime64 | timedelta64 - -_NoMaskType: TypeAlias = np.bool_[Literal[False]] # type of `np.False_` -_MaskArray: TypeAlias = np.ndarray[_ShapeOrAnyT, np.dtype[np.bool_]] - -_FillValue: TypeAlias = complex | None # int | float | complex | None -_FillValueCallable: TypeAlias = Callable[[np.dtype | ArrayLike], _FillValue] -_DomainCallable: TypeAlias = Callable[..., NDArray[np.bool_]] +type _MaskedArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] +type _Masked1D[ScalarT: np.generic] = MaskedArray[tuple[int], np.dtype[ScalarT]] +type _Masked2D[ScalarT: np.generic] = MaskedArray[tuple[int, int], np.dtype[ScalarT]] +type _Masked3D[ScalarT: np.generic] = MaskedArray[tuple[int, int, int], np.dtype[ScalarT]] + +type _MaskedArrayUInt_co = _MaskedArray[np.unsignedinteger | np.bool] +type _MaskedArrayInt_co = _MaskedArray[np.integer | np.bool] +type _MaskedArrayFloat64_co = _MaskedArray[np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool] +type _MaskedArrayFloat_co = _MaskedArray[np.floating | np.integer | np.bool] +type _MaskedArrayComplex128_co = _MaskedArray[np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool] +type _MaskedArrayComplex_co = _MaskedArray[np.inexact | np.integer | np.bool] +type _MaskedArrayNumber_co = _MaskedArray[np.number | np.bool] +type _MaskedArrayTD64_co = _MaskedArray[np.timedelta64 | np.integer | np.bool] + +type _ArrayInt_co = NDArray[np.integer | np.bool] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +# Workaround for https://github.com/microsoft/pyright/issues/10232 +type _ArrayNoD[ScalarT: np.generic] = np.ndarray[tuple[Never] | tuple[Never, Never], np.dtype[ScalarT]] + +type _ToArray1D[ScalarT: np.generic] = _Array1D[ScalarT] | Sequence[ScalarT] +type _ToArray2D[ScalarT: np.generic] = _Array2D[ScalarT] | Sequence[Sequence[ScalarT]] + +type _ConvertibleToInt = SupportsInt | SupportsIndex | _CharLike_co +type _ConvertibleToFloat = SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToComplex = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToTD64 = dt.timedelta | int | _CharLike_co | np.character | np.number | np.timedelta64 | np.bool | None +type _ConvertibleToDT64 = dt.date | int | _CharLike_co | np.character | np.number | np.datetime64 | np.bool | None +type _ArangeScalar = _RealNumber | np.datetime64 | np.timedelta64 + +type _NoMaskType = np.bool_[Literal[False]] # type of `np.False_` +type _MaskArray[ShapeT: _Shape] = np.ndarray[ShapeT, np.dtype[np.bool]] + +type _FillValue = complex | None # int | float | complex | None +type _FillValueCallable = Callable[[np.dtype | ArrayLike], _FillValue] +type _DomainCallable = Callable[..., NDArray[np.bool]] + +type _PyArray[T] = list[T] | tuple[T, ...] +type _PyScalar = complex | bytes | str + +type _Seq2D[T] = Sequence[Sequence[T]] +type _Seq3D[T] = Sequence[_Seq2D[T]] + +type _CorrelateMode = Literal["valid", "same", "full"] + +@type_check_only +class _HasShape[ShapeT_co: _Shape](Protocol): + @property + def shape(self, /) -> ShapeT_co: ... ### @@ -373,13 +385,13 @@ class _MaskedUnaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): # https://github.com/microsoft/pyright/issues/10849 # https://github.com/microsoft/pyright/issues/10899 # https://github.com/microsoft/pyright/issues/11049 - def __call__( - self: _MaskedUnaryOperation[Callable[Concatenate[Any, _Pss], _T]], + def __call__[**Tss, T]( + self: _MaskedUnaryOperation[Callable[Concatenate[Any, Tss], T]], /, a: ArrayLike, - *args: _Pss.args, - **kwargs: _Pss.kwargs, - ) -> _T: ... + *args: Tss.args, + **kwargs: Tss.kwargs, + ) -> T: ... # not generic at runtime class _MaskedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): @@ -389,14 +401,14 @@ class _MaskedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): def __init__(self, /, mbfunc: _UFuncT_co, fillx: _FillValue = 0, filly: _FillValue = 0) -> None: ... # NOTE: See the comment in `_MaskedUnaryOperation.__call__` - def __call__( - self: _MaskedBinaryOperation[Callable[Concatenate[Any, Any, _Pss], _T]], + def __call__[**Tss, T]( + self: _MaskedBinaryOperation[Callable[Concatenate[Any, Any, Tss], T]], /, a: ArrayLike, b: ArrayLike, - *args: _Pss.args, - **kwargs: _Pss.kwargs, - ) -> _T: ... + *args: Tss.args, + **kwargs: Tss.kwargs, + ) -> T: ... # NOTE: We cannot meaningfully annotate the return (d)types of these methods until # the signatures of the corresponding `numpy.ufunc` methods are specified. @@ -420,14 +432,14 @@ class _DomainedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): ) -> None: ... # NOTE: See the comment in `_MaskedUnaryOperation.__call__` - def __call__( - self: _DomainedBinaryOperation[Callable[Concatenate[Any, Any, _Pss], _T]], + def __call__[**Tss, T]( + self: _DomainedBinaryOperation[Callable[Concatenate[Any, Any, Tss], T]], /, a: ArrayLike, b: ArrayLike, - *args: _Pss.args, - **kwargs: _Pss.kwargs, - ) -> _T: ... + *args: Tss.args, + **kwargs: Tss.kwargs, + ) -> T: ... # not generic at runtime class _extrema_operation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): @@ -445,7 +457,7 @@ class _extrema_operation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): # NOTE: This class is only used internally for `maximum` and `minimum`, so we are # able to annotate the `__call__` method specifically for those two functions. @overload - def __call__(self, /, a: _ArrayLike[_ScalarT], b: _ArrayLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... + def __call__[ScalarT: np.generic](self, /, a: _ArrayLike[ScalarT], b: _ArrayLike[ScalarT]) -> _MaskedArray[ScalarT]: ... @overload def __call__(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... @@ -529,7 +541,7 @@ def maximum_fill_value(obj: object) -> Any: ... # @overload # returns `a.fill_value` if `a` is a `MaskedArray` -def get_fill_value(a: _MaskedArray[_ScalarT]) -> _ScalarT: ... +def get_fill_value[ScalarT: np.generic](a: _MaskedArray[ScalarT]) -> ScalarT: ... @overload # otherwise returns `default_fill_value(a)` def get_fill_value(a: object) -> Any: ... @@ -539,33 +551,36 @@ def set_fill_value(a: MaskedArray, fill_value: _ScalarLike_co) -> None: ... # the return type depends on the *values* of `a` and `b` (which cannot be known # statically), which is why we need to return an awkward `_ | None` @overload -def common_fill_value(a: _MaskedArray[_ScalarT], b: MaskedArray) -> _ScalarT | None: ... +def common_fill_value[ScalarT: np.generic](a: _MaskedArray[ScalarT], b: MaskedArray) -> ScalarT | None: ... @overload def common_fill_value(a: object, b: object) -> Any: ... # keep in sync with `fix_invalid`, but return `ndarray` instead of `MaskedArray` @overload -def filled(a: ndarray[_ShapeT, _DTypeT], fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT, _DTypeT]: ... +def filled[ShapeT: _Shape, DTypeT: np.dtype]( + a: ndarray[ShapeT, DTypeT], + fill_value: _ScalarLike_co | None = None, +) -> ndarray[ShapeT, DTypeT]: ... @overload -def filled(a: _ArrayLike[_ScalarT], fill_value: _ScalarLike_co | None = None) -> NDArray[_ScalarT]: ... +def filled[ScalarT: np.generic](a: _ArrayLike[ScalarT], fill_value: _ScalarLike_co | None = None) -> NDArray[ScalarT]: ... @overload def filled(a: ArrayLike, fill_value: _ScalarLike_co | None = None) -> NDArray[Incomplete]: ... # keep in sync with `filled`, but return `MaskedArray` instead of `ndarray` @overload -def fix_invalid( - a: np.ndarray[_ShapeT, _DTypeT], +def fix_invalid[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], mask: _ArrayLikeBool_co = nomask, copy: bool = True, fill_value: _ScalarLike_co | None = None, -) -> MaskedArray[_ShapeT, _DTypeT]: ... +) -> MaskedArray[ShapeT, DTypeT]: ... @overload -def fix_invalid( - a: _ArrayLike[_ScalarT], +def fix_invalid[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], mask: _ArrayLikeBool_co = nomask, copy: bool = True, fill_value: _ScalarLike_co | None = None, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def fix_invalid( a: ArrayLike, @@ -579,9 +594,12 @@ def get_masked_subclass(*arrays: object) -> type[MaskedArray]: ... # @overload -def getdata(a: np.ndarray[_ShapeT, _DTypeT], subok: bool = True) -> np.ndarray[_ShapeT, _DTypeT]: ... +def getdata[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + subok: bool = True, +) -> np.ndarray[ShapeT, DTypeT]: ... @overload -def getdata(a: _ArrayLike[_ScalarT], subok: bool = True) -> NDArray[_ScalarT]: ... +def getdata[ScalarT: np.generic](a: _ArrayLike[ScalarT], subok: bool = True) -> NDArray[ScalarT]: ... @overload def getdata(a: ArrayLike, subok: bool = True) -> NDArray[Incomplete]: ... @@ -591,9 +609,9 @@ get_data = getdata @overload def getmask(a: _ScalarLike_co) -> _NoMaskType: ... @overload -def getmask(a: MaskedArray[_ShapeT, Any]) -> _MaskArray[_ShapeT] | _NoMaskType: ... +def getmask[ShapeT: _Shape](a: MaskedArray[ShapeT, Any]) -> _MaskArray[ShapeT] | _NoMaskType: ... @overload -def getmask(a: ArrayLike) -> _MaskArray | _NoMaskType: ... +def getmask(a: ArrayLike) -> _MaskArray[_AnyShape] | _NoMaskType: ... get_mask = getmask @@ -601,10 +619,10 @@ get_mask = getmask @overload def getmaskarray(arr: _ScalarLike_co) -> _MaskArray[tuple[()]]: ... @overload -def getmaskarray(arr: np.ndarray[_ShapeT, Any]) -> _MaskArray[_ShapeT]: ... +def getmaskarray[ShapeT: _Shape](arr: np.ndarray[ShapeT, Any]) -> _MaskArray[ShapeT]: ... # It's sufficient for `m` to have dtype with type: `type[np.bool_]`, -# which isn't necessarily a ndarray. Please open an issue if this causes issues. +# which isn't necessarily an ndarray. Please open an issue if this causes issues. def is_mask(m: object) -> TypeIs[NDArray[bool_]]: ... # @@ -622,35 +640,35 @@ def make_mask( dtype: _DTypeLikeBool = ..., ) -> _NoMaskType: ... @overload # m: ndarray, shrink=True (default), dtype: bool-like (default) -def make_mask( - m: np.ndarray[_ShapeT], +def make_mask[ShapeT: _Shape]( + m: np.ndarray[ShapeT], copy: bool = False, shrink: Literal[True] = True, dtype: _DTypeLikeBool = ..., -) -> _MaskArray[_ShapeT] | _NoMaskType: ... +) -> _MaskArray[ShapeT] | _NoMaskType: ... @overload # m: ndarray, shrink=False (kwarg), dtype: bool-like (default) -def make_mask( - m: np.ndarray[_ShapeT], +def make_mask[ShapeT: _Shape]( + m: np.ndarray[ShapeT], copy: bool = False, *, shrink: Literal[False], dtype: _DTypeLikeBool = ..., -) -> _MaskArray[_ShapeT]: ... +) -> _MaskArray[ShapeT]: ... @overload # m: ndarray, dtype: void-like -def make_mask( - m: np.ndarray[_ShapeT], +def make_mask[ShapeT: _Shape]( + m: np.ndarray[ShapeT], copy: bool = False, shrink: bool = True, *, dtype: _DTypeLikeVoid, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... @overload # m: array-like, shrink=True (default), dtype: bool-like (default) def make_mask( m: ArrayLike, copy: bool = False, shrink: Literal[True] = True, dtype: _DTypeLikeBool = ..., -) -> _MaskArray | _NoMaskType: ... +) -> _MaskArray[_AnyShape] | _NoMaskType: ... @overload # m: array-like, shrink=False (kwarg), dtype: bool-like (default) def make_mask( m: ArrayLike, @@ -658,7 +676,7 @@ def make_mask( *, shrink: Literal[False], dtype: _DTypeLikeBool = ..., -) -> _MaskArray: ... +) -> _MaskArray[_AnyShape]: ... @overload # m: array-like, dtype: void-like def make_mask( m: ArrayLike, @@ -678,11 +696,11 @@ def make_mask( # @overload # known shape, dtype: unstructured (default) -def make_mask_none(newshape: _ShapeT, dtype: np.dtype | type | str | None = None) -> _MaskArray[_ShapeT]: ... +def make_mask_none[ShapeT: _Shape](newshape: ShapeT, dtype: np.dtype | type | str | None = None) -> _MaskArray[ShapeT]: ... @overload # known shape, dtype: structured -def make_mask_none(newshape: _ShapeT, dtype: _VoidDTypeLike) -> np.ndarray[_ShapeT, dtype[np.void]]: ... +def make_mask_none[ShapeT: _Shape](newshape: ShapeT, dtype: _VoidDTypeLike) -> np.ndarray[ShapeT, dtype[np.void]]: ... @overload # unknown shape, dtype: unstructured (default) -def make_mask_none(newshape: _ShapeLike, dtype: np.dtype | type | str | None = None) -> _MaskArray: ... +def make_mask_none(newshape: _ShapeLike, dtype: np.dtype | type | str | None = None) -> _MaskArray[_AnyShape]: ... @overload # unknown shape, dtype: structured def make_mask_none(newshape: _ShapeLike, dtype: _VoidDTypeLike) -> NDArray[np.void]: ... @@ -718,141 +736,206 @@ def mask_or( shrink: Literal[False], ) -> _MaskArray[tuple[()]]: ... @overload # ndarray, ndarray | nomask, shrink=True (default) -def mask_or( - m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]], - m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], copy: bool = False, shrink: Literal[True] = True, -) -> _MaskArray[_ShapeT] | _NoMaskType: ... +) -> _MaskArray[ShapeT] | _NoMaskType: ... @overload # ndarray, ndarray | nomask, shrink=False (kwarg) -def mask_or( - m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]], - m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], copy: bool = False, *, shrink: Literal[False], -) -> _MaskArray[_ShapeT]: ... +) -> _MaskArray[ShapeT]: ... @overload # ndarray | nomask, ndarray, shrink=True (default) -def mask_or( - m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], - m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]], +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]], copy: bool = False, shrink: Literal[True] = True, -) -> _MaskArray[_ShapeT] | _NoMaskType: ... +) -> _MaskArray[ShapeT] | _NoMaskType: ... @overload # ndarray | nomask, ndarray, shrink=False (kwarg) -def mask_or( - m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], - m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]], +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]], copy: bool = False, *, shrink: Literal[False], -) -> _MaskArray[_ShapeT]: ... +) -> _MaskArray[ShapeT]: ... # @overload -def flatten_mask(mask: np.ndarray[_ShapeT]) -> _MaskArray[_ShapeT]: ... +def flatten_mask[ShapeT: _Shape](mask: np.ndarray[ShapeT]) -> _MaskArray[ShapeT]: ... @overload -def flatten_mask(mask: ArrayLike) -> _MaskArray: ... +def flatten_mask(mask: ArrayLike) -> _MaskArray[_AnyShape]: ... # NOTE: we currently don't know the field types of `void` dtypes, so it's not possible # to know the output dtype of the returned array. @overload -def flatten_structured_array(a: MaskedArray[_ShapeT, np.dtype[np.void]]) -> MaskedArray[_ShapeT]: ... +def flatten_structured_array[ShapeT: _Shape](a: MaskedArray[ShapeT, np.dtype[np.void]]) -> MaskedArray[ShapeT]: ... @overload -def flatten_structured_array(a: np.ndarray[_ShapeT, np.dtype[np.void]]) -> np.ndarray[_ShapeT]: ... +def flatten_structured_array[ShapeT: _Shape](a: np.ndarray[ShapeT, np.dtype[np.void]]) -> np.ndarray[ShapeT]: ... @overload # for some reason this accepts unstructured array-likes, hence this fallback overload def flatten_structured_array(a: ArrayLike) -> np.ndarray: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_invalid(a: ndarray[_ShapeT, _DTypeT], copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_invalid[ShapeT: _Shape, DTypeT: np.dtype]( + a: ndarray[ShapeT, DTypeT], + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_invalid(a: _ArrayLike[_ScalarT], copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_invalid[ScalarT: np.generic](a: _ArrayLike[ScalarT], copy: bool = True) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_invalid(a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # array-like of known scalar-type -def masked_where( - condition: _ArrayLikeBool_co, a: ndarray[_ShapeT, _DTypeT], copy: bool = True -) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_where[ShapeT: _Shape, DTypeT: np.dtype]( + condition: _ArrayLikeBool_co, + a: ndarray[ShapeT, DTypeT], + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_where(condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_where[ScalarT: np.generic]( + condition: _ArrayLikeBool_co, + a: _ArrayLike[ScalarT], + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_where(condition: _ArrayLikeBool_co, a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_greater(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_greater[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_greater(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_greater[ScalarT: np.generic](x: _ArrayLike[ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_greater(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_greater_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_greater_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_greater_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_greater_equal[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + value: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_greater_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_less(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_less[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_less(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_less[ScalarT: np.generic](x: _ArrayLike[ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_less(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_less_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_less_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_less_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_less_equal[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + value: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_less_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_not_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_not_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_not_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_not_equal[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + value: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_not_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_equal[ScalarT: np.generic](x: _ArrayLike[ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_inside(x: ndarray[_ShapeT, _DTypeT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_inside[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_inside(x: _ArrayLike[_ScalarT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_inside[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_inside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_outside(x: ndarray[_ShapeT, _DTypeT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_outside[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_outside(x: _ArrayLike[_ScalarT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_outside[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_outside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # only intended for object arrays, so we assume that's how it's always used in practice @overload -def masked_object( - x: np.ndarray[_ShapeT, np.dtype[np.object_]], +def masked_object[ShapeT: _Shape]( + x: np.ndarray[ShapeT, np.dtype[np.object_]], value: object, copy: bool = True, shrink: bool = True, -) -> MaskedArray[_ShapeT, np.dtype[np.object_]]: ... +) -> MaskedArray[ShapeT, np.dtype[np.object_]]: ... @overload def masked_object( x: _ArrayLikeObject_co, @@ -863,23 +946,23 @@ def masked_object( # keep roughly in sync with `filled` @overload -def masked_values( - x: np.ndarray[_ShapeT, _DTypeT], +def masked_values[ShapeT: _Shape, DTypeT: np.dtype]( + x: np.ndarray[ShapeT, DTypeT], value: _ScalarLike_co, rtol: float = 1e-5, atol: float = 1e-8, copy: bool = True, - shrink: bool = True -) -> MaskedArray[_ShapeT, _DTypeT]: ... + shrink: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload -def masked_values( - x: _ArrayLike[_ScalarT], +def masked_values[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], value: _ScalarLike_co, rtol: float = 1e-5, atol: float = 1e-8, copy: bool = True, - shrink: bool = True -) -> _MaskedArray[_ScalarT]: ... + shrink: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload def masked_values( x: ArrayLike, @@ -887,7 +970,7 @@ def masked_values( rtol: float = 1e-5, atol: float = 1e-8, copy: bool = True, - shrink: bool = True + shrink: bool = True, ) -> _MaskedArray[Incomplete]: ... # TODO: Support non-boolean mask dtypes, such as `np.void`. This will require adding an @@ -913,7 +996,7 @@ class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): # Similar to `ndarray.__setitem__` but without the `void` case. @overload # flexible | object_ | bool def __setitem__( - self: MaskedIterator[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + self: MaskedIterator[Any, dtype[np.flexible | object_ | np.bool] | np.dtypes.StringDType], index: _ToIndices, value: object, /, @@ -957,15 +1040,15 @@ class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): def __setitem__(self, index: _ToIndices, value: ArrayLike, /) -> None: ... # TODO: Returns `mvoid[(), _DTypeT_co]` for masks with `np.void` dtype. - def __next__(self: MaskedIterator[Any, np.dtype[_ScalarT]]) -> _ScalarT: ... + def __next__[ScalarT: np.generic](self: MaskedIterator[Any, np.dtype[ScalarT]]) -> ScalarT: ... class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): __array_priority__: Final[Literal[15]] = 15 @overload - def __new__( + def __new__[ScalarT: np.generic]( cls, - data: _ArrayLike[_ScalarT], + data: _ArrayLike[ScalarT], mask: _ArrayLikeBool_co = nomask, dtype: None = None, copy: bool = False, @@ -976,13 +1059,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): hard_mask: bool | None = None, shrink: bool = True, order: _OrderKACF | None = None, - ) -> _MaskedArray[_ScalarT]: ... + ) -> _MaskedArray[ScalarT]: ... @overload - def __new__( + def __new__[ScalarT: np.generic]( cls, data: object, mask: _ArrayLikeBool_co, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], copy: bool = False, subok: bool = True, ndmin: int = 0, @@ -991,14 +1074,14 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): hard_mask: bool | None = None, shrink: bool = True, order: _OrderKACF | None = None, - ) -> _MaskedArray[_ScalarT]: ... + ) -> _MaskedArray[ScalarT]: ... @overload - def __new__( + def __new__[ScalarT: np.generic]( cls, data: object, mask: _ArrayLikeBool_co = nomask, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], copy: bool = False, subok: bool = True, ndmin: int = 0, @@ -1007,7 +1090,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): hard_mask: bool | None = None, shrink: bool = True, order: _OrderKACF | None = None, - ) -> _MaskedArray[_ScalarT]: ... + ) -> _MaskedArray[ScalarT]: ... @overload def __new__( cls, @@ -1024,44 +1107,56 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): order: _OrderKACF | None = None, ) -> _MaskedArray[Any]: ... - def __array_wrap__( + def __array_wrap__[ShapeT: _Shape, DTypeT: np.dtype]( self, - obj: ndarray[_ShapeT, _DTypeT], - context: tuple[ufunc, tuple[Any, ...], int] | None = None, + obj: ndarray[ShapeT, DTypeT], + context: tuple[np.ufunc, tuple[Any, ...], int] | None = None, return_scalar: bool = False, - ) -> MaskedArray[_ShapeT, _DTypeT]: ... + ) -> MaskedArray[ShapeT, DTypeT]: ... @overload # type: ignore[override] # () def view(self, /, dtype: None = None, type: None = None, fill_value: _ScalarLike_co | None = None) -> Self: ... @overload # (dtype: DTypeT) - def view( + def view[DTypeT: np.dtype]( self, /, - dtype: _DTypeT | _HasDType[_DTypeT], + dtype: DTypeT | _HasDType[DTypeT], type: None = None, - fill_value: _ScalarLike_co | None = None - ) -> MaskedArray[_ShapeT_co, _DTypeT]: ... + fill_value: _ScalarLike_co | None = None, + ) -> MaskedArray[_ShapeT_co, DTypeT]: ... @overload # (dtype: dtype[ScalarT]) - def view( + def view[ScalarT: np.generic]( self, /, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], type: None = None, - fill_value: _ScalarLike_co | None = None - ) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + fill_value: _ScalarLike_co | None = None, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload # ([dtype: _, ]*, type: ArrayT) - def view( + def view[ArrayT: np.ndarray]( self, /, dtype: DTypeLike | None = None, *, - type: type[_ArrayT], - fill_value: _ScalarLike_co | None = None - ) -> _ArrayT: ... + type: type[ArrayT], + fill_value: _ScalarLike_co | None = None, + ) -> ArrayT: ... @overload # (dtype: _, type: ArrayT) - def view(self, /, dtype: DTypeLike | None, type: type[_ArrayT], fill_value: _ScalarLike_co | None = None) -> _ArrayT: ... + def view[ArrayT: np.ndarray]( + self, + /, + dtype: DTypeLike | None, + type: type[ArrayT], + fill_value: _ScalarLike_co | None = None, + ) -> ArrayT: ... @overload # (dtype: ArrayT, /) - def view(self, /, dtype: type[_ArrayT], type: None = None, fill_value: _ScalarLike_co | None = None) -> _ArrayT: ... + def view[ArrayT: np.ndarray]( + self, + /, + dtype: type[ArrayT], + type: None = None, + fill_value: _ScalarLike_co | None = None, + ) -> ArrayT: ... @overload # (dtype: ?) def view( self, @@ -1070,8 +1165,8 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # overlaps with previous overloads. dtype: _VoidDTypeLike | str | None, type: None = None, - fill_value: _ScalarLike_co | None = None - ) -> MaskedArray[_ShapeT_co, dtype]: ... + fill_value: _ScalarLike_co | None = None, + ) -> MaskedArray[_ShapeT_co, np.dtype]: ... # Keep in sync with `ndarray.__getitem__` @overload @@ -1081,22 +1176,22 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __getitem__(self, key: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... @overload - def __getitem__(self: _MaskedArray[void], indx: str, /) -> MaskedArray[_ShapeT_co, dtype]: ... + def __getitem__(self: _MaskedArray[np.void], indx: str, /) -> MaskedArray[_ShapeT_co]: ... @overload - def __getitem__(self: _MaskedArray[void], indx: list[str], /) -> MaskedArray[_ShapeT_co, dtype[void]]: ... + def __getitem__(self: _MaskedArray[np.void], indx: list[str], /) -> MaskedArray[_ShapeT_co, np.dtype[np.void]]: ... @property def shape(self) -> _ShapeT_co: ... @shape.setter # type: ignore[override] - def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... + def shape[ShapeT: _Shape](self: MaskedArray[ShapeT, Any], shape: ShapeT, /) -> None: ... def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @property - def mask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... + def mask(self) -> np.ndarray[_ShapeT_co, np.dtype[MaskType]] | MaskType: ... @mask.setter def mask(self, value: _ArrayLikeBool_co, /) -> None: ... @property - def recordmask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... + def recordmask(self) -> np.ndarray[_ShapeT_co, np.dtype[MaskType]] | MaskType: ... @recordmask.setter def recordmask(self, mask: Never, /) -> NoReturn: ... def harden_mask(self) -> Self: ... @@ -1122,11 +1217,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def flat(self, value: ArrayLike, /) -> None: ... @property - def fill_value(self: _MaskedArray[_ScalarT]) -> _ScalarT: ... + def fill_value[ScalarT: np.generic](self: _MaskedArray[ScalarT]) -> ScalarT: ... @fill_value.setter def fill_value(self, value: _ScalarLike_co | None = None, /) -> None: ... - def get_fill_value(self: _MaskedArray[_ScalarT]) -> _ScalarT: ... + def get_fill_value[ScalarT: np.generic](self: _MaskedArray[ScalarT]) -> ScalarT: ... def set_fill_value(self, /, value: _ScalarLike_co | None = None) -> None: ... def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -1134,33 +1229,33 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # keep roughly in sync with `ma.core.compress`, but swap the first two arguments @overload # type: ignore[override] - def compress( + def compress[ArrayT: np.ndarray]( self, condition: _ArrayLikeBool_co, axis: _ShapeLike | None, - out: _ArrayT - ) -> _ArrayT: ... + out: ArrayT, + ) -> ArrayT: ... @overload - def compress( + def compress[ArrayT: np.ndarray]( self, condition: _ArrayLikeBool_co, axis: _ShapeLike | None = None, *, - out: _ArrayT - ) -> _ArrayT: ... + out: ArrayT, + ) -> ArrayT: ... @overload def compress( self, condition: _ArrayLikeBool_co, axis: None = None, - out: None = None + out: None = None, ) -> MaskedArray[tuple[int], _DTypeT_co]: ... @overload def compress( self, condition: _ArrayLikeBool_co, axis: _ShapeLike | None = None, - out: None = None + out: None = None, ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... # TODO: How to deal with the non-commutative nature of `==` and `!=`? @@ -1175,13 +1270,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__add__` @overload # type: ignore[override] - def __add__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __add__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload - def __add__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __add__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload - def __add__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __add__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __add__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __add__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1191,15 +1290,15 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __add__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __add__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __add__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __add__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __add__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def __add__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... @overload @@ -1212,10 +1311,10 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __add__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... @overload def __add__( - self: MaskedArray[Any, dtypes.StringDType], + self: MaskedArray[Any, np.dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, /, - ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + ) -> MaskedArray[_AnyShape, np.dtypes.StringDType]: ... @overload def __add__(self: _MaskedArray[object_], other: Any, /) -> Any: ... @overload @@ -1223,13 +1322,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__radd__` @overload # type: ignore[override] # signature equivalent to __add__ - def __radd__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __radd__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload - def __radd__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __radd__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload - def __radd__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __radd__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __radd__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __radd__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1239,15 +1342,15 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __radd__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __radd__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __radd__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __radd__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __radd__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def __radd__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... @overload @@ -1260,10 +1363,10 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __radd__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... @overload def __radd__( - self: MaskedArray[Any, dtypes.StringDType], + self: MaskedArray[Any, np.dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, /, - ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + ) -> MaskedArray[_AnyShape, np.dtypes.StringDType]: ... @overload def __radd__(self: _MaskedArray[object_], other: Any, /) -> Any: ... @overload @@ -1271,13 +1374,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__sub__` @overload # type: ignore[override] - def __sub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __sub__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload - def __sub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __sub__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __sub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __sub__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __sub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1287,15 +1394,15 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __sub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __sub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __sub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __sub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __sub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def __sub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload def __sub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... @overload @@ -1309,13 +1416,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__rsub__` @overload # type: ignore[override] - def __rsub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __rsub__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload - def __rsub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rsub__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rsub__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __rsub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1325,15 +1436,15 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __rsub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __rsub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __rsub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __rsub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def __rsub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... @overload @@ -1347,13 +1458,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__mul__` @overload # type: ignore[override] - def __mul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __mul__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload - def __mul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __mul__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload - def __mul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __mul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __mul__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __mul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1363,13 +1478,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __mul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __mul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __mul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __mul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload def __mul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload @@ -1378,7 +1493,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... @overload def __mul__( - self: MaskedArray[Any, dtype[character] | dtypes.StringDType], + self: MaskedArray[Any, np.dtype[np.character] | np.dtypes.StringDType], other: _ArrayLikeInt, /, ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... @@ -1389,13 +1504,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__rmul__` @overload # type: ignore[override] # signature equivalent to __mul__ - def __rmul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __rmul__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload - def __rmul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmul__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload - def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmul__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __rmul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1405,13 +1524,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rmul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __rmul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __rmul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __rmul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload def __rmul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload @@ -1420,7 +1539,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... @overload def __rmul__( - self: MaskedArray[Any, dtype[character] | dtypes.StringDType], + self: MaskedArray[Any, np.dtype[np.character] | np.dtypes.StringDType], other: _ArrayLikeInt, /, ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... @@ -1493,21 +1612,29 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__floordiv__` @overload # type: ignore[override] - def __floordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __floordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload - def __floordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __floordiv__[ScalarT: _RealNumber](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload - def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __floordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[np.bool], + other: _ArrayLike[ScalarT], + /, + ) -> _MaskedArray[ScalarT]: ... @overload def __floordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload def __floordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... @overload - def __floordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __floordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload def __floordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload @@ -1523,23 +1650,35 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__rfloordiv__` @overload # type: ignore[override] - def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __rfloordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload - def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rfloordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[ScalarT], + other: _ArrayLikeBool_co, + /, + ) -> _MaskedArray[ScalarT]: ... @overload - def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rfloordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[np.bool], + other: _ArrayLike[ScalarT], + /, + ) -> _MaskedArray[ScalarT]: ... @overload def __rfloordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload def __rfloordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... @overload - def __rfloordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __rfloordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __rfloordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload def __rfloordiv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[int64]: ... @overload @@ -1551,13 +1690,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__pow__` (minus the `mod` parameter) @overload # type: ignore[override] - def __pow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __pow__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload - def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload - def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __pow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __pow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1567,11 +1710,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __pow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __pow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __pow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __pow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload def __pow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload @@ -1583,13 +1726,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__rpow__` (minus the `mod` parameter) @overload # type: ignore[override] - def __rpow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __rpow__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload - def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload - def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __rpow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1599,11 +1746,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rpow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __rpow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __rpow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __rpow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload def __rpow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload @@ -1615,13 +1762,25 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # @property # type: ignore[misc] - def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] - def get_imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + def imag[ScalarT: np.generic]( # type: ignore[override] + self: _HasDTypeWithRealAndImag[object, ScalarT], + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... + def get_imag[ScalarT: np.generic]( + self: _HasDTypeWithRealAndImag[object, ScalarT], + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... # @property # type: ignore[misc] - def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] - def get_real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + def real[ScalarT: np.generic]( # type: ignore[override] + self: _HasDTypeWithRealAndImag[ScalarT, object], + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... + def get_real[ScalarT: np.generic]( + self: _HasDTypeWithRealAndImag[ScalarT, object], + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... # keep in sync with `np.ma.count` @overload @@ -1638,7 +1797,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # (None) def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: bool | None = None) -> Self: ... @overload # (empty_sequence) - def reshape( # type: ignore[overload-overlap] # mypy false positive + def reshape( self, shape: Sequence[Never], /, @@ -1647,14 +1806,14 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): copy: bool | None = None, ) -> MaskedArray[tuple[()], _DTypeT_co]: ... @overload # (() | (int) | (int, int) | ....) # up to 8-d - def reshape( + def reshape[ShapeT: _Shape]( self, - shape: _AnyShapeT, + shape: ShapeT, /, *, order: _OrderACF = "C", copy: bool | None = None, - ) -> MaskedArray[_AnyShapeT, _DTypeT_co]: ... + ) -> MaskedArray[ShapeT, _DTypeT_co]: ... @overload # (index) def reshape( self, @@ -1752,20 +1911,20 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> bool_ | _MaskedArray[bool_]: ... @overload - def all( + def all[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def all( + def all[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # Keep in sync with `ma.core.any` @overload # type: ignore[override] @@ -1798,20 +1957,20 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> bool_ | _MaskedArray[bool_]: ... @overload - def any( + def any[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def any( + def any[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # Keep in sync with `ndarray.trace` and `ma.core.trace` @overload @@ -1824,30 +1983,30 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): out: None = None, ) -> Any: ... @overload - def trace( + def trace[ArrayT: np.ndarray]( self, # >= 2D MaskedArray offset: SupportsIndex = 0, axis1: SupportsIndex = 0, axis2: SupportsIndex = 1, dtype: DTypeLike | None = None, *, - out: _ArrayT, - ) -> _ArrayT: ... + out: ArrayT, + ) -> ArrayT: ... @overload - def trace( + def trace[ArrayT: np.ndarray]( self, # >= 2D MaskedArray offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, dtype: DTypeLike | None, - out: _ArrayT, - ) -> _ArrayT: ... + out: ArrayT, + ) -> ArrayT: ... # This differs from `ndarray.dot`, in that 1D dot 1D returns a 0D array. @overload def dot(self, b: ArrayLike, out: None = None, strict: bool = False) -> _MaskedArray[Any]: ... @overload - def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = False) -> _ArrayT: ... + def dot[ArrayT: np.ndarray](self, b: ArrayLike, out: ArrayT, strict: bool = False) -> ArrayT: ... # Keep in sync with `ma.core.sum` @overload # type: ignore[override] @@ -1860,32 +2019,24 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload - def sum( + def sum[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def sum( + def sum[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... - - # Keep in sync with `ndarray.cumsum` and `ma.core.cumsum` - @overload # out: None (default) - def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... - @overload # out: ndarray - def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + ) -> ArrayT: ... # Keep in sync with `ma.core.prod` @overload # type: ignore[override] @@ -1898,34 +2049,206 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload - def prod( + def prod[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def prod( + def prod[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... product = prod - # Keep in sync with `ndarray.cumprod` and `ma.core.cumprod` - @overload # out: None (default) - def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... + # Keep in sync with `ndarray.cumprod` + @override # type: ignore[override] + @overload + def cumprod[DTypeT: dtype[number | object_]]( + self: MaskedArray[Any, DTypeT], + axis: None = None, + dtype: None = None, + out: None = None, + ) -> MaskedArray[tuple[int], DTypeT]: ... + @overload # bool_ + def cumprod( + self: _MaskedArray[np.bool], + axis: None = None, + dtype: None = None, + out: None = None, + ) -> _Masked1D[np.int_]: ... + @overload # dtype: (keyword) + def cumprod[ScalarT: np.generic]( + self: _MaskedArray[number | bool_ | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> _Masked1D[ScalarT]: ... + @overload # dtype: (keyword) + def cumprod( + self: _MaskedArray[number | bool_ | object_], + axis: None = None, + *, + dtype: DTypeLike, + out: None = None, + ) -> _Masked1D[Any]: ... + @overload # dtype: (positional) + def cumprod[ScalarT: np.generic]( + self: _MaskedArray[number | bool_ | object_], + axis: None, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> _Masked1D[ScalarT]: ... + @overload # dtype: (positional) + def cumprod( + self: _MaskedArray[number | bool_ | object_], + axis: None, + dtype: DTypeLike, + out: None = None, + ) -> _Masked1D[Any]: ... + @overload # axis: + def cumprod[ArrayT: _MaskedArray[number | object_]]( + self: ArrayT, + axis: SupportsIndex, + dtype: None = None, + out: None = None, + ) -> ArrayT: ... + @overload # bool_, axis: + def cumprod[ShapeT: _Shape]( + self: MaskedArray[ShapeT, np.dtype[np.bool]], + axis: SupportsIndex, + dtype: None = None, + out: None = None, + ) -> MaskedArray[ShapeT, np.dtype[np.int_]]: ... + @overload # axis: , dtype: + def cumprod[ShapeT: _Shape, ScalarT: np.generic]( + self: MaskedArray[ShapeT, dtype[number | bool_ | object_]], + axis: SupportsIndex, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> MaskedArray[ShapeT, dtype[ScalarT]]: ... + @overload # axis: , dtype: + def cumprod[ShapeT: _Shape]( + self: MaskedArray[ShapeT, dtype[number | bool_ | object_]], + axis: SupportsIndex, + dtype: DTypeLike, + out: None = None, + ) -> MaskedArray[ShapeT]: ... + @overload # out: ndarray + def cumprod[ArrayT: ndarray]( + self: _MaskedArray[number | bool_ | object_], + axis: SupportsIndex | None, + dtype: DTypeLike | None, + out: ArrayT, + ) -> ArrayT: ... + @overload + def cumprod[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self: _MaskedArray[number | bool_ | object_], + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... + + # Keep in sync with `ndarray.cumsum` + @override # type: ignore[override] + @overload + def cumsum[DTypeT: dtype[number | timedelta64 | object_]]( + self: MaskedArray[Any, DTypeT], + axis: None = None, + dtype: None = None, + out: None = None, + ) -> MaskedArray[tuple[int], DTypeT]: ... + @overload # bool_ + def cumsum( + self: _MaskedArray[np.bool], + axis: None = None, + dtype: None = None, + out: None = None, + ) -> _Masked1D[np.int_]: ... + @overload # dtype: (keyword) + def cumsum[ScalarT: np.generic]( + self: _MaskedArray[number | bool_ | timedelta64 | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> _Masked1D[ScalarT]: ... + @overload # dtype: (keyword) + def cumsum( + self: _MaskedArray[number | bool_ | timedelta64 | object_], + axis: None = None, + *, + dtype: DTypeLike, + out: None = None, + ) -> _Masked1D[Any]: ... + @overload # dtype: (positional) + def cumsum[ScalarT: np.generic]( + self: _MaskedArray[number | bool_ | timedelta64 | object_], + axis: None, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> _Masked1D[ScalarT]: ... + @overload # dtype: (positional) + def cumsum( + self: _MaskedArray[number | bool_ | timedelta64 | object_], + axis: None, + dtype: DTypeLike, + out: None = None, + ) -> _Masked1D[Any]: ... + @overload # axis: + def cumsum[ArrayT: _MaskedArray[number | timedelta64 | object_]]( + self: ArrayT, + axis: SupportsIndex, + dtype: None = None, + out: None = None, + ) -> ArrayT: ... + @overload # bool_, axis: + def cumsum[ShapeT: _Shape]( + self: MaskedArray[ShapeT, np.dtype[np.bool]], + axis: SupportsIndex, + dtype: None = None, + out: None = None, + ) -> MaskedArray[ShapeT, np.dtype[np.int_]]: ... + @overload # axis: , dtype: + def cumsum[ShapeT: _Shape, ScalarT: np.generic]( + self: MaskedArray[ShapeT, dtype[number | bool_ | timedelta64 | object_]], + axis: SupportsIndex, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> MaskedArray[ShapeT, dtype[ScalarT]]: ... + @overload # axis: , dtype: + def cumsum[ShapeT: _Shape]( + self: MaskedArray[ShapeT, dtype[number | bool_ | timedelta64 | object_]], + axis: SupportsIndex, + dtype: DTypeLike, + out: None = None, + ) -> MaskedArray[ShapeT]: ... @overload # out: ndarray - def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def cumsum[ArrayT: ndarray]( + self: _MaskedArray[number | bool_ | timedelta64 | object_], + axis: SupportsIndex | None, + dtype: DTypeLike | None, + out: ArrayT, + ) -> ArrayT: ... @overload - def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def cumsum[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self: _MaskedArray[number | bool_ | timedelta64 | object_], + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... # Keep in sync with `ma.core.mean` @overload # type: ignore[override] @@ -1937,32 +2260,32 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload - def mean( + def mean[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def mean( + def mean[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # keep roughly in sync with `ma.core.anom` @overload def anom(self, axis: SupportsIndex | None = None, dtype: None = None) -> Self: ... @overload - def anom(self, axis: SupportsIndex | None = None, *, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + def anom(self, axis: SupportsIndex | None = None, *, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, np.dtype]: ... @overload - def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, np.dtype]: ... # keep in sync with `std` and `ma.core.var` @overload # type: ignore[override] @@ -1976,26 +2299,26 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): mean: _ArrayLikeNumber_co | _NoValueType = ..., ) -> Any: ... @overload - def var( + def var[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def var( + def var[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # keep in sync with `var` and `ma.core.std` @overload # type: ignore[override] @@ -2009,34 +2332,34 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): mean: _ArrayLikeNumber_co | _NoValueType = ..., ) -> Any: ... @overload - def std( + def std[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def std( + def std[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # Keep in sync with `ndarray.round` @overload # out=None (default) def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... @overload # out=ndarray - def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ... + def round[ArrayT: np.ndarray](self, /, decimals: SupportsIndex, out: ArrayT) -> ArrayT: ... @overload - def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... + def round[ArrayT: np.ndarray](self, /, decimals: SupportsIndex = 0, *, out: ArrayT) -> ArrayT: ... def argsort( # type: ignore[override] self, @@ -2049,9 +2372,10 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): stable: bool = False, ) -> _MaskedArray[intp]: ... - # Keep in-sync with np.ma.argmin - @overload # type: ignore[override] - def argmin( + # keep in sync with `MaskedArray.argmin` (below) and `ndarray.argmax` + @override # type: ignore[override] + @overload + def argmax( self, axis: None = None, fill_value: _ScalarLike_co | None = None, @@ -2059,37 +2383,47 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): *, keepdims: Literal[False] | _NoValueType = ..., ) -> intp: ... - @overload - def argmin( + @overload # axis: + def argmax( + self, + axis: SupportsIndex, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., + ) -> _MaskedArray[intp]: ... + @overload # keepdims: True + def argmax( self, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, out: None = None, *, - keepdims: bool | _NoValueType = ..., - ) -> Any: ... - @overload - def argmin( + keepdims: Literal[True], + ) -> MaskedArray[_ShapeT_co, dtype[intp]]: ... + @overload # out: (keyword) + def argmax[ArrayT: NDArray[intp]]( self, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... - @overload - def argmin( + ) -> ArrayT: ... + @overload # out: (positional) + def argmax[ArrayT: NDArray[intp]]( # pyright: ignore[reportIncompatibleMethodOverride] self, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, - out: _ArrayT, + out: ArrayT, *, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... - # Keep in-sync with np.ma.argmax - @overload # type: ignore[override] - def argmax( + # keep in sync with `MaskedArray.argmax` (above) and `ndarray.argmin` + @override # type: ignore[override] + @overload + def argmin( self, axis: None = None, fill_value: _ScalarLike_co | None = None, @@ -2097,33 +2431,42 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): *, keepdims: Literal[False] | _NoValueType = ..., ) -> intp: ... - @overload - def argmax( + @overload # axis: + def argmin( + self, + axis: SupportsIndex, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., + ) -> _MaskedArray[intp]: ... + @overload # keepdims: True + def argmin( self, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, out: None = None, *, - keepdims: bool | _NoValueType = ..., - ) -> Any: ... - @overload - def argmax( + keepdims: Literal[True], + ) -> MaskedArray[_ShapeT_co, dtype[intp]]: ... + @overload # out: (keyword) + def argmin[ArrayT: NDArray[intp]]( self, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... - @overload - def argmax( + ) -> ArrayT: ... + @overload # out: (positional) + def argmin[ArrayT: NDArray[intp]]( # pyright: ignore[reportIncompatibleMethodOverride] self, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, - out: _ArrayT, + out: ArrayT, *, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # def sort( # type: ignore[override] @@ -2139,13 +2482,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # @overload # type: ignore[override] - def min( - self: _MaskedArray[_ScalarT], + def min[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] | _NoValueType = ..., - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def min( self, @@ -2155,32 +2498,32 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ... ) -> Any: ... @overload - def min( + def min[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def min( + def min[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # @overload # type: ignore[override] - def max( - self: _MaskedArray[_ScalarT], + def max[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] | _NoValueType = ..., - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def max( self, @@ -2190,32 +2533,32 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ... ) -> Any: ... @overload - def max( + def max[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def max( + def max[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # @overload - def ptp( - self: _MaskedArray[_ScalarT], + def ptp[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] = False, - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def ptp( self, @@ -2225,22 +2568,22 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool = False, ) -> Any: ... @overload - def ptp( + def ptp[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool = False, - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def ptp( + def ptp[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool = False, - ) -> _ArrayT: ... + ) -> ArrayT: ... # @overload @@ -2262,66 +2605,106 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): order: str | Sequence[str] | None = None, ) -> None: ... - # - @overload + # keep in sync with ndarray.argpartition + @override + @overload # axis: None def argpartition( self, + kth: _ArrayLikeInt, /, + axis: None, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> MaskedArray[tuple[int], np.dtype[intp]]: ... + @overload # axis: index (default) + def argpartition( + self, kth: _ArrayLikeInt, - axis: SupportsIndex | None = -1, + /, + axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: None = None, - ) -> _MaskedArray[intp]: ... - @overload + ) -> MaskedArray[_ShapeT_co, np.dtype[intp]]: ... + @overload # void, axis: None def argpartition( self: _MaskedArray[np.void], + kth: _ArrayLikeInt, /, + axis: None, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> MaskedArray[tuple[int], np.dtype[intp]]: ... + @overload # void, axis: index (default) + def argpartition( + self: _MaskedArray[np.void], kth: _ArrayLikeInt, - axis: SupportsIndex | None = -1, + /, + axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, - ) -> _MaskedArray[intp]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[intp]]: ... # Keep in-sync with np.ma.take @overload # type: ignore[override] - def take( # type: ignore[overload-overlap] - self: _MaskedArray[_ScalarT], + def take[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], indices: _IntLike_co, axis: None = None, out: None = None, mode: _ModeKind = "raise" - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload - def take( - self: _MaskedArray[_ScalarT], + def take[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, mode: _ModeKind = "raise", - ) -> _MaskedArray[_ScalarT]: ... + ) -> _MaskedArray[ScalarT]: ... @overload - def take( + def take[ArrayT: np.ndarray]( self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def take( + def take[ArrayT: np.ndarray]( self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", - ) -> _ArrayT: ... + ) -> ArrayT: ... # keep in sync with `ndarray.diagonal` @override + @overload # ?d (workaround) + def diagonal[DTypeT: dtype]( + self: MaskedArray[tuple[Never, Never, Never, Never], DTypeT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> MaskedArray[_AnyShape, DTypeT]: ... + @overload # 2d + def diagonal[DTypeT: dtype]( + self: MaskedArray[tuple[int, int], DTypeT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> MaskedArray[tuple[int], DTypeT]: ... + @overload # 3d + def diagonal[DTypeT: dtype]( + self: MaskedArray[tuple[int, int, int], DTypeT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> MaskedArray[tuple[int, int], DTypeT]: ... + @overload # Nd (fallback) def diagonal( self, - /, offset: SupportsIndex = 0, axis1: SupportsIndex = 0, axis2: SupportsIndex = 1, @@ -2369,19 +2752,23 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # keep in sync with `ndarray.tolist` @override @overload - def tolist(self: MaskedArray[tuple[Never], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> Any: ... + def tolist[T](self: MaskedArray[tuple[Never], np.dtype[generic[T]]], /, fill_value: _ScalarLike_co | None = None) -> Any: ... @overload - def tolist(self: MaskedArray[tuple[()], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> _T: ... + def tolist[T](self: MaskedArray[tuple[()], np.dtype[generic[T]]], /, fill_value: _ScalarLike_co | None = None) -> T: ... @overload - def tolist(self: MaskedArray[tuple[int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> list[_T]: ... + def tolist[T](self: _Masked1D[np.generic[T]], /, fill_value: _ScalarLike_co | None = None) -> list[T]: ... @overload - def tolist( - self: MaskedArray[tuple[int, int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None - ) -> list[list[_T]]: ... + def tolist[T]( + self: MaskedArray[tuple[int, int], np.dtype[generic[T]]], + /, + fill_value: _ScalarLike_co | None = None, + ) -> list[list[T]]: ... @overload - def tolist( - self: MaskedArray[tuple[int, int, int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None - ) -> list[list[list[_T]]]: ... + def tolist[T]( + self: MaskedArray[tuple[int, int, int], np.dtype[generic[T]]], + /, + fill_value: _ScalarLike_co | None = None, + ) -> list[list[list[T]]]: ... @overload def tolist(self, /, fill_value: _ScalarLike_co | None = None) -> Any: ... @@ -2397,34 +2784,41 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @property def dtype(self) -> _DTypeT_co: ... @dtype.setter - def dtype(self: MaskedArray[_AnyShape, _DTypeT], dtype: _DTypeT, /) -> None: ... + def dtype[DTypeT: np.dtype](self: MaskedArray[_AnyShape, DTypeT], dtype: DTypeT, /) -> None: ... class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): def __new__( - self, # pyright: ignore[reportSelfClsParameterName] - data, - mask=..., - dtype=..., - fill_value=..., - hardmask=..., - copy=..., - subok=..., - ): ... - def __getitem__(self, indx): ... - def __setitem__(self, indx, value): ... - def __iter__(self): ... - def __len__(self): ... - def filled(self, fill_value=None): ... - def tolist(self): ... # type: ignore[override] - -def isMaskedArray(x: object) -> TypeIs[MaskedArray]: ... -def isarray(x: object) -> TypeIs[MaskedArray]: ... # alias to isMaskedArray -def isMA(x: object) -> TypeIs[MaskedArray]: ... # alias to isMaskedArray - -# 0D float64 array -class MaskedConstant(MaskedArray[tuple[()], dtype[float64]]): - def __new__(cls) -> Self: ... - + cls, + /, + data: ArrayLike, + mask: _ArrayLikeBool_co = nomask, + dtype: DTypeLike | None = None, + fill_value: _FillValue = None, + hardmask: bool = False, + copy: bool = False, + subok: bool = True, + ) -> Self: ... + @override + def __getitem__(self, indx: _ToIndices, /) -> Incomplete: ... # type: ignore[override] + @override + def __setitem__(self, indx: _ToIndices, value: ArrayLike, /) -> None: ... # type: ignore[override] + @override + def __iter__[ScalarT: np.generic](self: mvoid[Any, np.dtype[ScalarT]], /) -> Iterator[MaskedConstant | ScalarT]: ... + @override + def __len__(self, /) -> int: ... + @override + def filled(self, /, fill_value: _ScalarLike_co | None = None) -> Self | np.void: ... # type: ignore[override] + @override # list or tuple + def tolist(self) -> Sequence[Incomplete]: ... # type: ignore[override] + +def isMaskedArray(x: object) -> TypeIs[MaskedArray]: ... +def isarray(x: object) -> TypeIs[MaskedArray]: ... # alias to isMaskedArray +def isMA(x: object) -> TypeIs[MaskedArray]: ... # alias to isMaskedArray + +# 0D float64 array +class MaskedConstant(MaskedArray[tuple[()], dtype[float64]]): + def __new__(cls) -> Self: ... + # these overrides are no-ops @override def __iadd__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] @@ -2446,12 +2840,12 @@ class MaskedConstant(MaskedArray[tuple[()], dtype[float64]]): masked: Final[MaskedConstant] = ... masked_singleton: Final[MaskedConstant] = ... -masked_array: TypeAlias = MaskedArray +type masked_array = MaskedArray # keep in sync with `MaskedArray.__new__` @overload -def array( - data: _ArrayLike[_ScalarT], +def array[ScalarT: np.generic]( + data: _ArrayLike[ScalarT], dtype: None = None, copy: bool = False, order: _OrderKACF | None = None, @@ -2462,11 +2856,11 @@ def array( shrink: bool = True, subok: bool = True, ndmin: int = 0, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload -def array( +def array[ScalarT: np.generic]( data: object, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], copy: bool = False, order: _OrderKACF | None = None, mask: _ArrayLikeBool_co = nomask, @@ -2476,9 +2870,9 @@ def array( shrink: bool = True, subok: bool = True, ndmin: int = 0, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload -def array( +def array[ScalarT: np.generic]( data: object, dtype: DTypeLike | None = None, copy: bool = False, @@ -2490,37 +2884,61 @@ def array( shrink: bool = True, subok: bool = True, ndmin: int = 0, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... # keep in sync with `array` @overload -def asarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... @overload -def asarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asarray[ScalarT: np.generic]( + a: object, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... @overload -def asarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asarray[ScalarT: np.generic]( + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... # keep in sync with `asarray` (but note the additional first overload) @overload -def asanyarray(a: _MArrayT, dtype: None = None, order: _OrderKACF | None = None) -> _MArrayT: ... +def asanyarray[MArrayT: MaskedArray](a: MArrayT, dtype: None = None, order: _OrderKACF | None = None) -> MArrayT: ... @overload -def asanyarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asanyarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... @overload -def asanyarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asanyarray[ScalarT: np.generic]( + a: object, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... @overload -def asanyarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asanyarray[ScalarT: np.generic]( + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... # def is_masked(x: object) -> bool: ... @overload -def min( - obj: _ArrayLike[_ScalarT], +def min[ScalarT: np.generic]( + obj: _ArrayLike[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def min( obj: ArrayLike, @@ -2530,31 +2948,31 @@ def min( keepdims: bool | _NoValueType = ... ) -> Any: ... @overload -def min( +def min[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def min( +def min[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def max( - obj: _ArrayLike[_ScalarT], +def max[ScalarT: np.generic]( + obj: _ArrayLike[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def max( obj: ArrayLike, @@ -2564,31 +2982,31 @@ def max( keepdims: bool | _NoValueType = ... ) -> Any: ... @overload -def max( +def max[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def max( +def max[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def ptp( - obj: _ArrayLike[_ScalarT], +def ptp[ScalarT: np.generic]( + obj: _ArrayLike[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def ptp( obj: ArrayLike, @@ -2598,22 +3016,22 @@ def ptp( keepdims: bool | _NoValueType = ... ) -> Any: ... @overload -def ptp( +def ptp[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def ptp( +def ptp[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # we cannot meaningfully annotate `frommethod` further, because the callable signature # of the return type fully depends on the *value* of `methodname` and `reversed` in @@ -2624,44 +3042,47 @@ def _frommethod(methodname: str, reversed: bool = False) -> types.FunctionType: # since their use-cases are specific to masks, they only accept `MaskedArray` inputs. # keep in sync with `MaskedArray.harden_mask` -def harden_mask(a: _MArrayT) -> _MArrayT: ... +def harden_mask[MArrayT: MaskedArray](a: MArrayT) -> MArrayT: ... # keep in sync with `MaskedArray.soften_mask` -def soften_mask(a: _MArrayT) -> _MArrayT: ... +def soften_mask[MArrayT: MaskedArray](a: MArrayT) -> MArrayT: ... # keep in sync with `MaskedArray.shrink_mask` -def shrink_mask(a: _MArrayT) -> _MArrayT: ... +def shrink_mask[MArrayT: MaskedArray](a: MArrayT) -> MArrayT: ... # keep in sync with `MaskedArray.ids` def ids(a: ArrayLike) -> tuple[int, int]: ... # keep in sync with `ndarray.nonzero` -def nonzero(a: ArrayLike) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... +def nonzero(a: ArrayLike) -> tuple[_Array1D[np.intp], ...]: ... # keep first overload in sync with `MaskedArray.ravel` @overload -def ravel(a: np.ndarray[Any, _DTypeT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT]: ... +def ravel[DTypeT: np.dtype](a: np.ndarray[Any, DTypeT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], DTypeT]: ... @overload -def ravel(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +def ravel[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "C") -> _Masked1D[ScalarT]: ... @overload def ravel(a: ArrayLike, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... # keep roughly in sync with `lib._function_base_impl.copy` @overload -def copy(a: _MArrayT, order: _OrderKACF = "C") -> _MArrayT: ... +def copy[MArrayT: MaskedArray](a: MArrayT, order: _OrderKACF = "C") -> MArrayT: ... @overload -def copy(a: np.ndarray[_ShapeT, _DTypeT], order: _OrderKACF = "C") -> MaskedArray[_ShapeT, _DTypeT]: ... +def copy[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + order: _OrderKACF = "C", +) -> MaskedArray[ShapeT, DTypeT]: ... @overload -def copy(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> _MaskedArray[_ScalarT]: ... +def copy[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "C") -> _MaskedArray[ScalarT]: ... @overload def copy(a: ArrayLike, order: _OrderKACF = "C") -> _MaskedArray[Incomplete]: ... # keep in sync with `_core.fromnumeric.diagonal` @overload -def diagonal( - a: _ArrayLike[_ScalarT], +def diagonal[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], offset: SupportsIndex = 0, axis1: SupportsIndex = 0, axis2: SupportsIndex = 1, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def diagonal( a: ArrayLike, @@ -2672,32 +3093,49 @@ def diagonal( # keep in sync with `_core.fromnumeric.repeat` @overload -def repeat(a: _ArrayLike[_ScalarT], repeats: _ArrayLikeInt_co, axis: None = None) -> MaskedArray[tuple[int], dtype[_ScalarT]]: ... +def repeat[ScalarT: np.generic](a: _ArrayLike[ScalarT], repeats: _ArrayLikeInt_co, axis: None = None) -> _Masked1D[ScalarT]: ... @overload -def repeat(a: _ArrayLike[_ScalarT], repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _MaskedArray[_ScalarT]: ... +def repeat[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, +) -> _MaskedArray[ScalarT]: ... @overload -def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: None = None) -> MaskedArray[tuple[int], dtype[Incomplete]]: ... +def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: None = None) -> _Masked1D[Incomplete]: ... @overload def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _MaskedArray[Incomplete]: ... # keep in sync with `_core.fromnumeric.swapaxes` @overload -def swapaxes(a: _MArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> _MArrayT: ... +def swapaxes[MArrayT: MaskedArray](a: MArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> MArrayT: ... @overload -def swapaxes(a: _ArrayLike[_ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[_ScalarT]: ... +def swapaxes[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis1: SupportsIndex, + axis2: SupportsIndex, +) -> _MaskedArray[ScalarT]: ... @overload def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[Incomplete]: ... # NOTE: The `MaskedArray.anom` definition is specific to `MaskedArray`, so we need # additional overloads to cover the array-like input here. @overload # a: MaskedArray, dtype=None -def anom(a: _MArrayT, axis: SupportsIndex | None = None, dtype: None = None) -> _MArrayT: ... +def anom[MArrayT: MaskedArray](a: MArrayT, axis: SupportsIndex | None = None, dtype: None = None) -> MArrayT: ... @overload # a: array-like, dtype=None -def anom(a: _ArrayLike[_ScalarT], axis: SupportsIndex | None = None, dtype: None = None) -> _MaskedArray[_ScalarT]: ... +def anom[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: SupportsIndex | None = None, + dtype: None = None, +) -> _MaskedArray[ScalarT]: ... @overload # a: unknown array-like, dtype: dtype-like (positional) -def anom(a: ArrayLike, axis: SupportsIndex | None, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +def anom[ScalarT: np.generic](a: ArrayLike, axis: SupportsIndex | None, dtype: _DTypeLike[ScalarT]) -> _MaskedArray[ScalarT]: ... @overload # a: unknown array-like, dtype: dtype-like (keyword) -def anom(a: ArrayLike, axis: SupportsIndex | None = None, *, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +def anom[ScalarT: np.generic]( + a: ArrayLike, + axis: SupportsIndex | None = None, + *, + dtype: _DTypeLike[ScalarT], +) -> _MaskedArray[ScalarT]: ... @overload # a: unknown array-like, dtype: unknown dtype-like (positional) def anom(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike) -> _MaskedArray[Incomplete]: ... @overload # a: unknown array-like, dtype: unknown dtype-like (keyword) @@ -2714,12 +3152,26 @@ def all(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True def all(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... @overload def all( - a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, keepdims: bool | _NoValueType = ... + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., ) -> np.bool | _MaskedArray[np.bool]: ... @overload -def all(a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +def all[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... @overload -def all(a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +def all[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... # Keep in sync with `all` and `MaskedArray.any` @overload @@ -2730,53 +3182,99 @@ def any(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True def any(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... @overload def any( - a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, keepdims: bool | _NoValueType = ... + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., ) -> np.bool | _MaskedArray[np.bool]: ... @overload -def any(a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +def any[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... @overload -def any(a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +def any[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... # NOTE: The `MaskedArray.compress` definition uses its `DTypeT_co` type parameter, # which wouldn't work here for array-like inputs, so we need additional overloads. @overload -def compress( - condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], axis: None = None, out: None = None -) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +def compress[ScalarT: np.generic]( + condition: _ArrayLikeBool_co, + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, +) -> _Masked1D[ScalarT]: ... @overload -def compress( - condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], axis: _ShapeLike | None = None, out: None = None -) -> MaskedArray[_AnyShape, np.dtype[_ScalarT]]: ... +def compress[ScalarT: np.generic]( + condition: _ArrayLikeBool_co, + a: _ArrayLike[ScalarT], + axis: _ShapeLike | None = None, + out: None = None, +) -> _MaskedArray[ScalarT]: ... @overload -def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: None = None, out: None = None) -> MaskedArray[tuple[int]]: ... +def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: None = None, out: None = None) -> _Masked1D[Incomplete]: ... @overload def compress( - condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None = None, out: None = None + condition: _ArrayLikeBool_co, + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, ) -> _MaskedArray[Incomplete]: ... @overload -def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... +def compress[ArrayT: np.ndarray](condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None, out: ArrayT) -> ArrayT: ... @overload -def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... +def compress[ArrayT: np.ndarray]( + condition: _ArrayLikeBool_co, + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... # Keep in sync with `cumprod` and `MaskedArray.cumsum` @overload # out: None (default) def cumsum( - a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, ) -> _MaskedArray[Incomplete]: ... @overload # out: ndarray (positional) -def cumsum(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... +def cumsum[ArrayT: np.ndarray](a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload # out: ndarray (kwarg) -def cumsum(a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... +def cumsum[ArrayT: np.ndarray]( + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... # Keep in sync with `cumsum` and `MaskedArray.cumsum` @overload # out: None (default) def cumprod( - a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, ) -> _MaskedArray[Incomplete]: ... @overload # out: ndarray (positional) -def cumprod(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... +def cumprod[ArrayT: np.ndarray](a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload # out: ndarray (kwarg) -def cumprod(a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... +def cumprod[ArrayT: np.ndarray]( + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... # Keep in sync with `sum`, `prod`, `product`, and `MaskedArray.mean` @overload @@ -2788,22 +3286,22 @@ def mean( keepdims: bool | _NoValueType = ..., ) -> Incomplete: ... @overload -def mean( +def mean[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def mean( +def mean[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # Keep in sync with `mean`, `prod`, `product`, and `MaskedArray.sum` @overload @@ -2815,22 +3313,22 @@ def sum( keepdims: bool | _NoValueType = ..., ) -> Incomplete: ... @overload -def sum( +def sum[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def sum( +def sum[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # Keep in sync with `product` and `MaskedArray.prod` @overload @@ -2842,22 +3340,22 @@ def prod( keepdims: bool | _NoValueType = ..., ) -> Incomplete: ... @overload -def prod( +def prod[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def prod( +def prod[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # Keep in sync with `prod` and `MaskedArray.prod` @overload @@ -2869,22 +3367,22 @@ def product( keepdims: bool | _NoValueType = ..., ) -> Incomplete: ... @overload -def product( +def product[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def product( +def product[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # Keep in sync with `MaskedArray.trace` and `_core.fromnumeric.trace` @overload @@ -2897,24 +3395,24 @@ def trace( out: None = None, ) -> Incomplete: ... @overload -def trace( +def trace[ArrayT: np.ndarray]( a: ArrayLike, offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, dtype: DTypeLike | None, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def trace( +def trace[ArrayT: np.ndarray]( a: ArrayLike, offset: SupportsIndex = 0, axis1: SupportsIndex = 0, axis2: SupportsIndex = 1, dtype: DTypeLike | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... # keep in sync with `std` and `MaskedArray.var` @overload @@ -2928,26 +3426,26 @@ def std( mean: _ArrayLikeNumber_co | _NoValueType = ..., ) -> Incomplete: ... @overload -def std( +def std[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def std( +def std[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # keep in sync with `std` and `MaskedArray.var` @overload @@ -2961,26 +3459,26 @@ def var( mean: _ArrayLikeNumber_co | _NoValueType = ..., ) -> Incomplete: ... @overload -def var( +def var[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def var( +def var[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # (a, b) minimum: _extrema_operation = ... @@ -3016,23 +3514,23 @@ def argmin( keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def argmin( +def argmin[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def argmin( +def argmin[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, - out: _ArrayT, + out: ArrayT, *, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # keep in sync with `argmin` @overload @@ -3054,40 +3552,40 @@ def argmax( keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def argmax( +def argmax[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def argmax( +def argmax[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, - out: _ArrayT, + out: ArrayT, *, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def take( - a: _ArrayLike[_ScalarT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _IntLike_co, axis: None = None, out: None = None, - mode: _ModeKind = "raise" -) -> _ScalarT: ... + mode: _ModeKind = "raise", +) -> ScalarT: ... @overload -def take( - a: _ArrayLike[_ScalarT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, mode: _ModeKind = "raise", -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def take( a: ArrayLike, @@ -3105,28 +3603,93 @@ def take( mode: _ModeKind = "raise", ) -> _MaskedArray[Any]: ... @overload -def take( +def take[ArrayT: np.ndarray]( a: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex | None, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def take( +def take[ArrayT: np.ndarray]( a: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", -) -> _ArrayT: ... +) -> ArrayT: ... + +# +def power(a: ArrayLike, b: ArrayLike, third: None = None) -> _MaskedArray[Incomplete]: ... + +# +@overload # axis: (deprecated) +@deprecated( + "In the future the default for argsort will be axis=-1, not the current None, to match its documentation and np.argsort. " + "Explicitly pass -1 or None to silence this warning.", + category=MaskedArrayFutureWarning, + stacklevel=2, +) +def argsort( + a: ArrayLike, + axis: _NoValueType = ..., + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool | None = None, +) -> _Array1D[np.intp]: ... +@overload # MaskedArray, axis: None +def argsort( + a: MaskedArray, + axis: None, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool | None = None, +) -> _Masked1D[np.intp]: ... +@overload # MaskedArray, axis: int-like +def argsort( + a: MaskedArray, + axis: SupportsIndex, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool | None = None, +) -> _MaskedArray[np.intp]: ... +@overload # array-like, axis: None +def argsort( + a: ArrayLike, + axis: None, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool | None = None, +) -> _Array1D[np.intp]: ... +@overload # array-like, axis: int-like +def argsort( + a: ArrayLike, + axis: SupportsIndex, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool | None = None, +) -> NDArray[np.intp]: ... -def power(a, b, third=None): ... -def argsort(a, axis=..., kind=None, order=None, endwith=True, fill_value=None, *, stable=None): ... +# @overload -def sort( - a: _ArrayT, +def sort[ArrayT: np.ndarray]( + a: ArrayT, axis: SupportsIndex = -1, kind: _SortKind | None = None, order: str | Sequence[str] | None = None, @@ -3134,11 +3697,11 @@ def sort( fill_value: _ScalarLike_co | None = None, *, stable: Literal[False] | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload def sort( a: ArrayLike, - axis: SupportsIndex = -1, + axis: SupportsIndex | None = -1, kind: _SortKind | None = None, order: str | Sequence[str] | None = None, endwith: bool | None = True, @@ -3146,45 +3709,499 @@ def sort( *, stable: Literal[False] | None = None, ) -> NDArray[Any]: ... + +# @overload -def compressed(x: _ArrayLike[_ScalarT_co]) -> _Array1D[_ScalarT_co]: ... +def compressed[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> _Array1D[ScalarT]: ... @overload def compressed(x: ArrayLike) -> _Array1D[Any]: ... -def concatenate(arrays, axis=0): ... -def diag(v, k=0): ... -def left_shift(a, n): ... -def right_shift(a, n): ... -def put(a: NDArray[Any], indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... -def putmask(a: NDArray[Any], mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... -def transpose(a, axes=None): ... -def reshape(a, new_shape, order="C"): ... -def resize(x, new_shape): ... + +# +@overload +def concatenate[ScalarT: np.generic](arrays: _ArrayLike[ScalarT], axis: SupportsIndex | None = 0) -> _MaskedArray[ScalarT]: ... +@overload +def concatenate(arrays: SupportsLenAndGetItem[ArrayLike], axis: SupportsIndex | None = 0) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `diag` and `lib._twodim_base_impl.diag` +@overload +def diag[ScalarT: np.generic](v: _ArrayNoD[ScalarT] | Sequence[Sequence[ScalarT]], k: int = 0) -> _MaskedArray[ScalarT]: ... +@overload +def diag[ScalarT: np.generic](v: _Array2D[ScalarT] | Sequence[Sequence[ScalarT]], k: int = 0) -> _Masked1D[ScalarT]: ... +@overload +def diag[ScalarT: np.generic](v: _Array1D[ScalarT] | Sequence[ScalarT], k: int = 0) -> _Masked2D[ScalarT]: ... +@overload +def diag(v: Sequence[Sequence[_ScalarLike_co]], k: int = 0) -> _Masked1D[Incomplete]: ... +@overload +def diag(v: Sequence[_ScalarLike_co], k: int = 0) -> _Masked2D[Incomplete]: ... +@overload +def diag[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> _MaskedArray[ScalarT]: ... +@overload +def diag(v: ArrayLike, k: int = 0) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `right_shift` +@overload +def left_shift[ShapeT: _Shape, ScalarT: np.bool | np.integer | np.object_]( + a: ndarray[ShapeT, np.dtype[ScalarT]], n: int +) -> MaskedArray[ShapeT, np.dtype[ScalarT]]: ... +@overload +def left_shift[ScalarT: np.bool | np.integer | np.object_](a: _ArrayLike[ScalarT], n: int) -> _MaskedArray[ScalarT]: ... +@overload +def left_shift(a: _ArrayLikeInt_co, n: int) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `left_shift` +@overload +def right_shift[ShapeT: _Shape, ScalarT: np.bool | np.integer | np.object_]( + a: ndarray[ShapeT, np.dtype[ScalarT]], n: int +) -> MaskedArray[ShapeT, np.dtype[ScalarT]]: ... +@overload +def right_shift[ScalarT: np.bool | np.integer | np.object_](a: _ArrayLike[ScalarT], n: int) -> _MaskedArray[ScalarT]: ... +@overload +def right_shift(a: _ArrayLikeInt_co, n: int) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.put` +def put(a: np.ndarray, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... + +# +def putmask(a: np.ndarray, mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... + +# keep in sync with `_core.fromnumeric.transpose` +@overload +def transpose[ArrayT: np.ndarray](a: ArrayT, axes: _ShapeLike | None = None) -> ArrayT: ... +@overload +def transpose[ScalarT: np.generic](a: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> _MaskedArray[ScalarT]: ... +@overload # `_MaskedArray | np.ndarray` is equivalent to `np.ndarray` +def transpose(a: ArrayLike, axes: _ShapeLike | None = None) -> np.ndarray: ... + +# keep in sync with `_core.fromnumeric.reshape` +@overload # shape: index +def reshape[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], new_shape: SupportsIndex, order: _OrderACF = "C" +) -> _Masked1D[ScalarT]: ... +@overload # shape: ~ShapeT +def reshape[ScalarT: np.generic, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], new_shape: ShapeT, order: _OrderACF = "C" +) -> MaskedArray[ShapeT, np.dtype[ScalarT]]: ... +@overload # shape: Sequence[index] +def reshape[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], new_shape: Sequence[SupportsIndex], order: _OrderACF = "C" +) -> _MaskedArray[ScalarT]: ... +@overload # shape: index +def reshape(a: ArrayLike, new_shape: SupportsIndex, order: _OrderACF = "C") -> _Masked1D[Incomplete]: ... +@overload # shape: ~ShapeT +def reshape[ShapeT: _Shape](a: ArrayLike, new_shape: ShapeT, order: _OrderACF = "C") -> MaskedArray[ShapeT]: ... +@overload # shape: Sequence[index] +def reshape(a: ArrayLike, new_shape: Sequence[SupportsIndex], order: _OrderACF = "C") -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.resize` +@overload +def resize[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex] +) -> _Masked1D[ScalarT]: ... +@overload +def resize[ScalarT: np.generic, ShapeT: _Shape]( + x: _ArrayLike[ScalarT], new_shape: ShapeT +) -> MaskedArray[ShapeT, np.dtype[ScalarT]]: ... +@overload +def resize[ScalarT: np.generic](x: _ArrayLike[ScalarT], new_shape: _ShapeLike) -> _MaskedArray[ScalarT]: ... +@overload +def resize(x: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> _Masked1D[Incomplete]: ... +@overload +def resize[ShapeT: _Shape](x: ArrayLike, new_shape: ShapeT) -> MaskedArray[ShapeT]: ... +@overload +def resize(x: ArrayLike, new_shape: _ShapeLike) -> _MaskedArray[Incomplete]: ... + +# def ndim(obj: ArrayLike) -> int: ... -def shape(obj): ... + +# keep in sync with `_core.fromnumeric.shape` +@overload # this prevents `Any` from being returned with Pyright +def shape(obj: _HasShape[Never]) -> _AnyShape: ... +@overload +def shape[ShapeT: _Shape](obj: _HasShape[ShapeT]) -> ShapeT: ... +@overload +def shape(obj: _PyScalar) -> tuple[()]: ... +@overload # `collections.abc.Sequence` can't be used because `bytes` and `str` are assignable to it +def shape(obj: _PyArray[_PyScalar]) -> tuple[int]: ... +@overload +def shape(obj: _PyArray[_PyArray[_PyScalar]]) -> tuple[int, int]: ... +@overload # requires PEP 688 support +def shape(obj: memoryview | bytearray) -> tuple[int]: ... +@overload +def shape(obj: ArrayLike) -> _AnyShape: ... + +# def size(obj: ArrayLike, axis: SupportsIndex | None = None) -> int: ... -def diff(a, /, n=1, axis=-1, prepend=..., append=...): ... -def where(condition, x=..., y=...): ... -def choose(indices, choices, out=None, mode="raise"): ... -def round_(a, decimals=0, out=None): ... -round = round_ -def inner(a, b): ... +# keep in sync with `lib._function_base_impl.diff` +@overload # known array-type +def diff[MArrayT: _MaskedArray[np.inexact | np.timedelta64 | np.object_]]( + a: MArrayT, + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> MArrayT: ... +@overload # known shape, datetime64 +def diff[ShapeT: _Shape]( + a: MaskedArray[ShapeT, np.dtype[np.datetime64]], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> MaskedArray[ShapeT, np.dtype[np.timedelta64]]: ... +@overload # unknown shape, known scalar-type +def diff[ScalarT: np.inexact | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _MaskedArray[ScalarT]: ... +@overload # unknown shape, datetime64 +def diff( + a: _ArrayLike[np.datetime64], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _MaskedArray[np.timedelta64]: ... +@overload # 1d int +def diff( + a: Sequence[int], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked1D[np.int_]: ... +@overload # 2d int +def diff( + a: Sequence[Sequence[int]], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked2D[np.int_]: ... +@overload # 1d float (the `list` avoids overlap with the `int` overloads) +def diff( + a: list[float], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked1D[np.float64]: ... +@overload # 2d float +def diff( + a: Sequence[list[float]], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked2D[np.float64]: ... +@overload # 1d complex (the `list` avoids overlap with the `int` overloads) +def diff( + a: list[complex], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked1D[np.complex128]: ... +@overload # 2d complex +def diff( + a: Sequence[list[complex]], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked2D[np.complex128]: ... +@overload # unknown shape, unknown scalar-type +def diff( + a: ArrayLike, + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.multiarray.where` +@overload +def where(condition: ArrayLike, x: _NoValueType = ..., y: _NoValueType = ...) -> tuple[_MaskedArray[np.intp], ...]: ... +@overload +def where(condition: ArrayLike, x: ArrayLike, y: ArrayLike) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.choose` +@overload +def choose( + indices: _IntLike_co, + choices: ArrayLike, + out: None = None, + mode: _ModeKind = "raise", +) -> Any: ... +@overload +def choose[ScalarT: np.generic]( + indices: _ArrayLikeInt_co, + choices: _ArrayLike[ScalarT], + out: None = None, + mode: _ModeKind = "raise", +) -> _MaskedArray[ScalarT]: ... +@overload +def choose( + indices: _ArrayLikeInt_co, + choices: ArrayLike, + out: None = None, + mode: _ModeKind = "raise", +) -> _MaskedArray[Incomplete]: ... +@overload +def choose[ArrayT: np.ndarray]( + indices: _ArrayLikeInt_co, + choices: ArrayLike, + out: ArrayT, + mode: _ModeKind = "raise", +) -> ArrayT: ... + +# +@overload # a: masked_array, out: None (default) +def round[MArray: MaskedArray](a: MArray, decimals: int = 0, out: None = None) -> MArray: ... +@overload # a: known array-like, out: None (default) +def round[ScalarT: np.number](a: _ArrayLike[ScalarT], decimals: int = 0, out: None = None) -> _MaskedArray[ScalarT]: ... +@overload # a: unknown array-like, out: None (default) +def round(a: _ArrayLikeNumber_co, decimals: int = 0, out: None = None) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +def round[ArrayT: np.ndarray](a: ArrayLike, decimals: int, out: ArrayT) -> ArrayT: ... +@overload # out: ndarray (keyword) +def round[ArrayT: np.ndarray](a: ArrayLike, decimals: int = 0, *, out: ArrayT) -> ArrayT: ... + +# +@overload # a: masked_array, out: None (default) +@deprecated("numpy.ma.round_ is deprecated. Use numpy.ma.round instead.") +def round_[MArray: MaskedArray](a: MArray, decimals: int = 0, out: None = None) -> MArray: ... +@overload # a: known array-like, out: None (default) +@deprecated("numpy.ma.round_ is deprecated. Use numpy.ma.round instead.") +def round_[ScalarT: np.number](a: _ArrayLike[ScalarT], decimals: int = 0, out: None = None) -> _MaskedArray[ScalarT]: ... +@overload # a: unknown array-like, out: None (default) +@deprecated("numpy.ma.round_ is deprecated. Use numpy.ma.round instead.") +def round_(a: _ArrayLikeNumber_co, decimals: int = 0, out: None = None) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +@deprecated("numpy.ma.round_ is deprecated. Use numpy.ma.round instead.") +def round_[ArrayT: np.ndarray](a: ArrayLike, decimals: int, out: ArrayT) -> ArrayT: ... +@overload # out: ndarray (keyword) +@deprecated("numpy.ma.round_ is deprecated. Use numpy.ma.round instead.") +def round_[ArrayT: np.ndarray](a: ArrayLike, decimals: int = 0, *, out: ArrayT) -> ArrayT: ... + +# keep in sync with `_core.multiarray.inner` +@overload # (?d T, Nd T) -> 0d|Nd T (workaround) +def inner[ScalarT: _InnerScalar | np.object_](a: _ArrayNoD[ScalarT], b: _ArrayLike[ScalarT]) -> _MaskedArray[ScalarT] | Any: ... +@overload # (Nd T, ?d T) -> 0d|Nd T (workaround) +def inner[ScalarT: _InnerScalar | np.object_](a: _ArrayLike[ScalarT], b: _ArrayNoD[ScalarT]) -> _MaskedArray[ScalarT] | Any: ... +@overload # (1d T, 1d T) -> 0d T +def inner[ScalarT: _InnerScalar](a: _ToArray1D[ScalarT], b: _ToArray1D[ScalarT]) -> ScalarT: ... +@overload # (1d object_, 1d _) -> 0d object +def inner(a: _Array1D[np.object_], b: _Array1D[np.object_] | _ToArray1D[_InnerScalar]) -> Any: ... +@overload # (1d _, 1d object_) -> 0d object +def inner(a: _ToArray1D[_InnerScalar], b: _Array1D[np.object_]) -> Any: ... +@overload # (1d bool, 1d bool) -> bool_ +def inner(a: Sequence[bool], b: Sequence[bool]) -> np.bool: ... +@overload # (1d ~int, 1d +int) -> int_ +def inner(a: list[int], b: Sequence[int]) -> np.int_: ... +@overload # (1d +int, 1d ~int) -> int_ +def inner(a: Sequence[int], b: list[int]) -> np.int_: ... +@overload # (1d ~float, 1d +float) -> float64 +def inner(a: list[float], b: Sequence[float]) -> np.float64: ... +@overload # (1d +float, 1d ~float) -> float64 +def inner(a: Sequence[float], b: list[float]) -> np.float64: ... +@overload # (1d ~complex, 1d +complex) -> complex128 +def inner(a: list[complex], b: Sequence[complex]) -> np.complex128: ... +@overload # (1d +complex, 1d ~complex) -> complex128 +def inner(a: Sequence[complex], b: list[complex]) -> np.complex128: ... +@overload # (1d T, 2d T) -> 1d T +def inner[ScalarT: _InnerScalar | np.object_](a: _ToArray1D[ScalarT], b: _Array2D[ScalarT]) -> _Masked1D[ScalarT]: ... +@overload # (2d T, 1d T) -> 1d T +def inner[ScalarT: _InnerScalar | np.object_](a: _ToArray2D[ScalarT], b: _Array1D[ScalarT]) -> _Masked1D[ScalarT]: ... +@overload # (2d T, 2d T) -> 2d _Masked1D +def inner[ScalarT: _InnerScalar | np.object_](a: _ToArray2D[ScalarT], b: _Array2D[ScalarT]) -> _Masked2D[ScalarT]: ... +@overload # fallback +def inner(a: ArrayLike, b: ArrayLike) -> Any: ... + innerproduct = inner -def outer(a, b): ... +# NOTE: we ignore UP047 because inlining `_AnyScalarT` would result in a lot of code duplication + +# keep in sync with `_core.numeric.outer` +@overload +def outer(a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT]) -> _Masked2D[_AnyNumericScalarT]: ... # noqa: UP047 +@overload +def outer(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> _Masked2D[np.bool]: ... +@overload +def outer(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> _Masked2D[np.int_ | Any]: ... +@overload +def outer(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> _Masked2D[np.float64 | Any]: ... +@overload +def outer(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> _Masked2D[np.complex128 | Any]: ... +@overload +def outer(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co) -> _Masked2D[np.timedelta64 | Any]: ... + outerproduct = outer -def correlate(a, v, mode="valid", propagate_mask=True): ... -def convolve(a, v, mode="full", propagate_mask=True): ... +# keep in sync with `convolve` and `_core.numeric.correlate` +@overload +def correlate( # noqa: UP047 + a: _ArrayLike[_AnyNumericScalarT], + v: _ArrayLike[_AnyNumericScalarT], + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Masked1D[_AnyNumericScalarT]: ... +@overload +def correlate( + a: _ArrayLikeBool_co, + v: _ArrayLikeBool_co, + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Masked1D[np.bool]: ... +@overload +def correlate( + a: _ArrayLikeInt_co, + v: _ArrayLikeInt_co, + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Masked1D[np.int_ | Any]: ... +@overload +def correlate( + a: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Masked1D[np.float64 | Any]: ... +@overload +def correlate( + a: _ArrayLikeNumber_co, + v: _ArrayLikeNumber_co, + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Masked1D[np.complex128 | Any]: ... +@overload +def correlate( + a: _ArrayLikeTD64_co, + v: _ArrayLikeTD64_co, + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Masked1D[np.timedelta64 | Any]: ... + +# keep in sync with `correlate` and `_core.numeric.convolve` +@overload +def convolve( # noqa: UP047 + a: _ArrayLike[_AnyNumericScalarT], + v: _ArrayLike[_AnyNumericScalarT], + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Masked1D[_AnyNumericScalarT]: ... +@overload +def convolve( + a: _ArrayLikeBool_co, + v: _ArrayLikeBool_co, + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Masked1D[np.bool]: ... +@overload +def convolve( + a: _ArrayLikeInt_co, + v: _ArrayLikeInt_co, + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Masked1D[np.int_ | Any]: ... +@overload +def convolve( + a: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Masked1D[np.float64 | Any]: ... +@overload +def convolve( + a: _ArrayLikeNumber_co, + v: _ArrayLikeNumber_co, + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Masked1D[np.complex128 | Any]: ... +@overload +def convolve( + a: _ArrayLikeTD64_co, + v: _ArrayLikeTD64_co, + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Masked1D[np.timedelta64 | Any]: ... +# def allequal(a: ArrayLike, b: ArrayLike, fill_value: bool = True) -> bool: ... - def allclose(a: ArrayLike, b: ArrayLike, masked_equal: bool = True, rtol: float = 1e-5, atol: float = 1e-8) -> bool: ... -def fromflex(fxarray): ... +# +def fromflex[ShapeT: _Shape](fxarray: np.ndarray[ShapeT, np.dtype[np.void]]) -> MaskedArray[ShapeT, np.dtype[Incomplete]]: ... + +# keep in sync with `lib._function_base_impl.append` +@overload # known array type, axis specified +def append[MArrayT: MaskedArray]( + a: MArrayT, + b: MArrayT, + axis: SupportsIndex, +) -> MArrayT: ... +@overload # 1d, known scalar type, axis specified +def append[ScalarT: np.generic]( + a: Sequence[ScalarT], + b: Sequence[ScalarT], + axis: SupportsIndex, +) -> _Masked1D[ScalarT]: ... +@overload # 2d, known scalar type, axis specified +def append[ScalarT: np.generic]( + a: _Seq2D[ScalarT], + b: _Seq2D[ScalarT], + axis: SupportsIndex, +) -> _Masked2D[ScalarT]: ... +@overload # 3d, known scalar type, axis specified +def append[ScalarT: np.generic]( + a: _Seq3D[ScalarT], + b: _Seq3D[ScalarT], + axis: SupportsIndex, +) -> _Masked3D[ScalarT]: ... +@overload # ?d, known scalar type, axis specified +def append[ScalarT: np.generic]( + a: _NestedSequence[ScalarT], + b: _NestedSequence[ScalarT], + axis: SupportsIndex, +) -> _MaskedArray[ScalarT]: ... +@overload # ?d, unknown scalar type, axis specified +def append( + a: np.ndarray | _NestedSequence[_ScalarLike_co], + b: _NestedSequence[_ScalarLike_co], + axis: SupportsIndex, +) -> _MaskedArray[Incomplete]: ... +@overload # known scalar type, axis=None +def append[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + b: _ArrayLike[ScalarT], + axis: None = None, +) -> _Masked1D[ScalarT]: ... +@overload # unknown scalar type, axis=None +def append( + a: ArrayLike, + b: ArrayLike, + axis: None = None, +) -> _Masked1D[Incomplete]: ... -def append(a, b, axis=None): ... -def dot(a, b, strict=False, out=None): ... +# keep in sync with `_core.multiarray.dot` +@overload +def dot(a: ArrayLike, b: ArrayLike, strict: bool = False, out: None = None) -> Incomplete: ... +@overload +def dot[OutT: np.ndarray](a: ArrayLike, b: ArrayLike, strict: bool = False, *, out: OutT) -> OutT: ... # internal wrapper functions for the functions below def _convert2ma( @@ -3196,18 +4213,18 @@ def _convert2ma( # keep in sync with `_core.multiarray.arange` @overload # dtype= -def arange( +def arange[ScalarT: _ArangeScalar]( start_or_stop: _ArangeScalar | float, /, stop: _ArangeScalar | float | None = None, step: _ArangeScalar | float | None = 1, *, - dtype: _DTypeLike[_ArangeScalarT], + dtype: _DTypeLike[ScalarT], device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> _Masked1D[_ArangeScalarT]: ... +) -> _Masked1D[ScalarT]: ... @overload # (int-like, int-like?, int-like?) def arange( start_or_stop: _IntLike_co, @@ -3286,11 +4303,23 @@ def arange( fill_value: _FillValue | None = None, hardmask: bool = False, ) -> _Masked1D[np.datetime64[Incomplete]]: ... +@overload # (str, str, timedelta-like, dtype=dt64-like) (requires both start and stop) +def arange( + start_or_stop: str, + /, + stop: str, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.datetime64] | _DT64Codes, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.datetime64[Incomplete]]: ... @overload # dtype= def arange( - start_or_stop: _ArangeScalar | float, + start_or_stop: _ArangeScalar | float | str, /, - stop: _ArangeScalar | float | None = None, + stop: _ArangeScalar | float | str | None = None, step: _ArangeScalar | float | None = 1, *, dtype: DTypeLike | None = None, @@ -3302,8 +4331,8 @@ def arange( # based on `_core.fromnumeric.clip` @overload -def clip( - a: _ScalarT, +def clip[ScalarT: np.generic]( + a: ScalarT, a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., out: None = None, @@ -3314,10 +4343,10 @@ def clip( hardmask: bool = False, dtype: None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def clip( - a: NDArray[_ScalarT], +def clip[ScalarT: np.generic]( + a: NDArray[ScalarT], a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., out: None = None, @@ -3328,13 +4357,13 @@ def clip( hardmask: bool = False, dtype: None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload -def clip( +def clip[MArrayT: MaskedArray]( a: ArrayLike, a_min: ArrayLike | None, a_max: ArrayLike | None, - out: _MArrayT, + out: MArrayT, *, min: ArrayLike | _NoValueType | None = ..., max: ArrayLike | _NoValueType | None = ..., @@ -3342,21 +4371,21 @@ def clip( hardmask: bool = False, dtype: DTypeLike | None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _MArrayT: ... +) -> MArrayT: ... @overload -def clip( +def clip[MArrayT: MaskedArray]( a: ArrayLike, a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., *, - out: _MArrayT, + out: MArrayT, min: ArrayLike | _NoValueType | None = ..., max: ArrayLike | _NoValueType | None = ..., fill_value: _FillValue | None = None, hardmask: bool = False, dtype: DTypeLike | None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _MArrayT: ... +) -> MArrayT: ... @overload def clip( a: ArrayLike, @@ -3383,29 +4412,29 @@ def empty( like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], np.dtype[np.float64]]: ... +) -> _Masked1D[np.float64]: ... @overload -def empty( +def empty[DTypeT: np.dtype]( shape: SupportsIndex, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], _DTypeT]: ... +) -> MaskedArray[tuple[int], DTypeT]: ... @overload -def empty( +def empty[ScalarT: np.generic]( shape: SupportsIndex, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +) -> _Masked1D[ScalarT]: ... @overload def empty( shape: SupportsIndex, @@ -3416,10 +4445,10 @@ def empty( like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int]]: ... +) -> _Masked1D[Any]: ... @overload # known shape -def empty( - shape: _AnyShapeT, +def empty[ShapeT: _Shape]( + shape: ShapeT, dtype: None = None, order: _OrderCF = "C", *, @@ -3427,32 +4456,32 @@ def empty( like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[_AnyShapeT, np.dtype[np.float64]]: ... +) -> MaskedArray[ShapeT, np.dtype[np.float64]]: ... @overload -def empty( - shape: _AnyShapeT, - dtype: _DTypeT | _SupportsDType[_DTypeT], +def empty[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[_AnyShapeT, _DTypeT]: ... +) -> MaskedArray[ShapeT, DTypeT]: ... @overload -def empty( - shape: _AnyShapeT, - dtype: type[_ScalarT], +def empty[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + dtype: type[ScalarT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[_AnyShapeT, np.dtype[_ScalarT]]: ... +) -> MaskedArray[ShapeT, np.dtype[ScalarT]]: ... @overload -def empty( - shape: _AnyShapeT, +def empty[ShapeT: _Shape]( + shape: ShapeT, dtype: DTypeLike | None = None, order: _OrderCF = "C", *, @@ -3460,9 +4489,9 @@ def empty( like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[_AnyShapeT]: ... +) -> MaskedArray[ShapeT]: ... @overload # unknown shape -def empty( +def empty[ShapeT: _Shape]( shape: _ShapeLike, dtype: None = None, order: _OrderCF = "C", @@ -3473,27 +4502,27 @@ def empty( hardmask: bool = False, ) -> _MaskedArray[np.float64]: ... @overload -def empty( +def empty[DTypeT: np.dtype]( shape: _ShapeLike, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[_AnyShape, _DTypeT]: ... +) -> MaskedArray[_AnyShape, DTypeT]: ... @overload -def empty( +def empty[ScalarT: np.generic]( shape: _ShapeLike, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def empty( shape: _ShapeLike, @@ -3507,8 +4536,8 @@ def empty( # keep in sync with `_core.multiarray.empty_like` @overload -def empty_like( - a: _MArrayT, +def empty_like[MArrayT: MaskedArray]( + a: MArrayT, /, dtype: None = None, order: _OrderKACF = "K", @@ -3516,10 +4545,10 @@ def empty_like( shape: _ShapeLike | None = None, *, device: Literal["cpu"] | None = None, -) -> _MArrayT: ... +) -> MArrayT: ... @overload -def empty_like( - a: _ArrayLike[_ScalarT], +def empty_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], /, dtype: None = None, order: _OrderKACF = "K", @@ -3527,18 +4556,18 @@ def empty_like( shape: _ShapeLike | None = None, *, device: Literal["cpu"] | None = None, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload -def empty_like( +def empty_like[ScalarT: np.generic]( a: Incomplete, /, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", subok: bool = True, shape: _ShapeLike | None = None, *, device: Literal["cpu"] | None = None, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def empty_like( a: Incomplete, @@ -3570,14 +4599,14 @@ def frombuffer( like: _SupportsArrayFunc | None = None, ) -> _MaskedArray[np.float64]: ... @overload -def frombuffer( +def frombuffer[ScalarT: np.generic]( buffer: Buffer, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], count: SupportsIndex = -1, offset: SupportsIndex = 0, *, like: _SupportsArrayFunc | None = None, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def frombuffer( buffer: Buffer, @@ -3589,14 +4618,14 @@ def frombuffer( ) -> _MaskedArray[Incomplete]: ... # keep roughly in sync with `_core.numeric.fromfunction` -def fromfunction( - function: Callable[..., np.ndarray[_ShapeT, _DTypeT]], +def fromfunction[ShapeT: _Shape, DTypeT: np.dtype]( + function: Callable[..., np.ndarray[ShapeT, DTypeT]], shape: Sequence[int], *, dtype: DTypeLike | None = float, like: _SupportsArrayFunc | None = None, **kwargs: object, -) -> MaskedArray[_ShapeT, _DTypeT]: ... +) -> MaskedArray[ShapeT, DTypeT]: ... # keep roughly in sync with `_core.numeric.identity` @overload @@ -3609,14 +4638,14 @@ def identity( hardmask: bool = False, ) -> MaskedArray[tuple[int, int], np.dtype[np.float64]]: ... @overload -def identity( +def identity[ScalarT: np.generic]( n: int, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], *, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int, int], np.dtype[_ScalarT]]: ... +) -> MaskedArray[tuple[int, int], np.dtype[ScalarT]]: ... @overload def identity( n: int, @@ -3656,23 +4685,23 @@ def indices( hardmask: bool = False, ) -> tuple[_MaskedArray[np.intp], ...]: ... @overload -def indices( +def indices[ScalarT: np.generic]( dimensions: Sequence[int], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], sparse: Literal[False] = False, *, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload -def indices( +def indices[ScalarT: np.generic]( dimensions: Sequence[int], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], sparse: Literal[True], *, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> tuple[_MaskedArray[_ScalarT], ...]: ... +) -> tuple[_MaskedArray[ScalarT], ...]: ... @overload def indices( dimensions: Sequence[int], @@ -3703,13 +4732,13 @@ def indices( # keep roughly in sync with `_core.fromnumeric.squeeze` @overload -def squeeze( - a: _ArrayLike[_ScalarT], +def squeeze[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axis: _ShapeLike | None = None, *, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def squeeze( a: ArrayLike, diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 7387d4f9beb7..769c38fdc900 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1078,7 +1078,7 @@ def mask_rowcols(a, axis=None): Examples -------- >>> import numpy as np - >>> a = np.zeros((3, 3), dtype=int) + >>> a = np.zeros((3, 3), dtype=np.int_) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], @@ -1135,7 +1135,7 @@ def mask_rows(a, axis=np._NoValue): Examples -------- >>> import numpy as np - >>> a = np.zeros((3, 3), dtype=int) + >>> a = np.zeros((3, 3), dtype=np.int_) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], @@ -1186,7 +1186,7 @@ def mask_cols(a, axis=np._NoValue): Examples -------- >>> import numpy as np - >>> a = np.zeros((3, 3), dtype=int) + >>> a = np.zeros((3, 3), dtype=np.int_) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 70881bd15c8a..232d040360ea 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,15 +1,33 @@ -from _typeshed import Incomplete -from collections.abc import Sequence -from typing import SupportsIndex, TypeAlias, TypeVar, overload +from _typeshed import Incomplete, SupportsLenAndGetItem +from collections.abc import Callable, Iterator, Sequence +from typing import ( + Any, + Concatenate, + Final, + Literal as L, + SupportsIndex, + TypeVar, + overload, + override, +) import numpy as np from numpy import _CastingKind +from numpy._globals import _NoValueType from numpy._typing import ( ArrayLike, DTypeLike, + NDArray, _AnyShape, _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, _DTypeLike, + _NestedSequence, + _NumberLike_co, + _Shape, _ShapeLike, ) from numpy.lib._function_base_impl import average @@ -66,24 +84,51 @@ __all__ = [ "vstack", ] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ScalarT1 = TypeVar("_ScalarT1", bound=np.generic) -_ScalarT2 = TypeVar("_ScalarT2", bound=np.generic) -_MArrayT = TypeVar("_MArrayT", bound=MaskedArray) +type _MArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] +type _MArray1D[ScalarT: np.generic] = MaskedArray[tuple[int], np.dtype[ScalarT]] +type _MArray2D[ScalarT: np.generic] = MaskedArray[tuple[int, int], np.dtype[ScalarT]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] + +type _IntArray = NDArray[np.intp] +type _ScalarNumeric = np.inexact | np.timedelta64 | np.object_ +type _InexactDouble = np.float64 | np.longdouble | np.complex128 | np.clongdouble +type _ListSeqND[T] = list[T] | _NestedSequence[list[T]] -_MArray: TypeAlias = MaskedArray[_AnyShape, np.dtype[_ScalarT]] +# helper aliases for polyfit +type _2Tup[T] = tuple[T, T] +type _5Tup[T] = tuple[T, NDArray[np.float64], NDArray[np.int32], NDArray[np.float64], NDArray[np.float64]] + +# Explicitly set all allowed values to prevent accidental castings to +# abstract dtypes (their common super-type). +# Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) +# which could result in, for example, `int64` and `float64` producing a +# `number[_64Bit]` array +_AnyScalarT = TypeVar( + "_AnyScalarT", + np.bool, + np.int8, np.int16, np.int32, np.int64, np.intp, + np.uint8, np.uint16, np.uint32, np.uint64, np.uintp, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, np.datetime64, + np.bytes_, np.str_, np.void, np.object_, + np.integer, np.floating, np.complexfloating, np.character, +) # fmt: skip ### # keep in sync with `numpy._core.shape_base.atleast_1d` @overload -def atleast_1d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +def atleast_1d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> _MArray[ScalarT]: ... @overload -def atleast_1d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +def atleast_1d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[_MArray[ScalarT1], _MArray[ScalarT2]]: ... @overload -def atleast_1d( - a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] -) -> tuple[_MArray[_ScalarT], ...]: ... +def atleast_1d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[_MArray[ScalarT], ...]: ... @overload def atleast_1d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... @overload @@ -93,13 +138,15 @@ def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray # keep in sync with `numpy._core.shape_base.atleast_2d` @overload -def atleast_2d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +def atleast_2d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> _MArray[ScalarT]: ... @overload -def atleast_2d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +def atleast_2d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[_MArray[ScalarT1], _MArray[ScalarT2]]: ... @overload -def atleast_2d( - a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] -) -> tuple[_MArray[_ScalarT], ...]: ... +def atleast_2d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[_MArray[ScalarT], ...]: ... @overload def atleast_2d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... @overload @@ -109,13 +156,15 @@ def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray # keep in sync with `numpy._core.shape_base.atleast_2d` @overload -def atleast_3d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +def atleast_3d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> _MArray[ScalarT]: ... @overload -def atleast_3d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +def atleast_3d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[_MArray[ScalarT1], _MArray[ScalarT2]]: ... @overload -def atleast_3d( - a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] -) -> tuple[_MArray[_ScalarT], ...]: ... +def atleast_3d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[_MArray[ScalarT], ...]: ... @overload def atleast_3d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... @overload @@ -125,19 +174,19 @@ def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray # keep in sync with `numpy._core.shape_base.vstack` @overload -def vstack( - tup: Sequence[_ArrayLike[_ScalarT]], +def vstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload -def vstack( +def vstack[ScalarT: np.generic]( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload def vstack( tup: Sequence[ArrayLike], @@ -150,19 +199,19 @@ row_stack = vstack # keep in sync with `numpy._core.shape_base.hstack` @overload -def hstack( - tup: Sequence[_ArrayLike[_ScalarT]], +def hstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload -def hstack( +def hstack[ScalarT: np.generic]( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload def hstack( tup: Sequence[ArrayLike], @@ -173,35 +222,35 @@ def hstack( # keep in sync with `numpy._core.shape_base_impl.column_stack` @overload -def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> _MArray[_ScalarT]: ... +def column_stack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> _MArray[ScalarT]: ... @overload def column_stack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... # keep in sync with `numpy._core.shape_base_impl.dstack` @overload -def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> _MArray[_ScalarT]: ... +def dstack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> _MArray[ScalarT]: ... @overload def dstack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... # keep in sync with `numpy._core.shape_base.stack` @overload -def stack( - arrays: Sequence[_ArrayLike[_ScalarT]], +def stack[ScalarT: np.generic]( + arrays: Sequence[_ArrayLike[ScalarT]], axis: SupportsIndex = 0, out: None = None, *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload -def stack( +def stack[ScalarT: np.generic]( arrays: Sequence[ArrayLike], axis: SupportsIndex = 0, out: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload def stack( arrays: Sequence[ArrayLike], @@ -212,86 +261,507 @@ def stack( casting: _CastingKind = "same_kind" ) -> _MArray[Incomplete]: ... @overload -def stack( +def stack[MArrayT: MaskedArray]( arrays: Sequence[ArrayLike], axis: SupportsIndex, - out: _MArrayT, + out: MArrayT, *, dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind", -) -> _MArrayT: ... +) -> MArrayT: ... @overload -def stack( +def stack[MArrayT: MaskedArray]( arrays: Sequence[ArrayLike], axis: SupportsIndex = 0, *, - out: _MArrayT, + out: MArrayT, dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind", -) -> _MArrayT: ... +) -> MArrayT: ... # keep in sync with `numpy._core.shape_base_impl.hsplit` @overload -def hsplit(ary: _ArrayLike[_ScalarT], indices_or_sections: _ShapeLike) -> list[_MArray[_ScalarT]]: ... +def hsplit[ScalarT: np.generic](ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike) -> list[_MArray[ScalarT]]: ... @overload def hsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[_MArray[Incomplete]]: ... # keep in sync with `numpy._core.twodim_base_impl.hsplit` @overload -def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> _MArray[_ScalarT]: ... +def diagflat[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> _MArray[ScalarT]: ... @overload def diagflat(v: ArrayLike, k: int = 0) -> _MArray[Incomplete]: ... -# TODO: everything below - -def count_masked(arr, axis=None): ... -def masked_all(shape, dtype=float): ... # noqa: PYI014 -def masked_all_like(arr): ... - -def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... -def apply_over_axes(func, a, axes): ... -def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): ... -def compress_nd(x, axis=None): ... -def compress_rowcols(x, axis=None): ... -def compress_rows(a): ... -def compress_cols(a): ... -def mask_rows(a, axis=...): ... -def mask_cols(a, axis=...): ... -def ediff1d(arr, to_end=None, to_begin=None): ... -def unique(ar1, return_index=False, return_inverse=False): ... -def intersect1d(ar1, ar2, assume_unique=False): ... -def setxor1d(ar1, ar2, assume_unique=False): ... -def in1d(ar1, ar2, assume_unique=False, invert=False): ... -def isin(element, test_elements, assume_unique=False, invert=False): ... -def union1d(ar1, ar2): ... -def setdiff1d(ar1, ar2, assume_unique=False): ... -def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): ... -def corrcoef(x, y=None, rowvar=True, allow_masked=True): ... +# +def count_masked(arr: ArrayLike, axis: SupportsIndex | None = None) -> NDArray[np.intp]: ... + +# +@overload +def masked_all[ScalarT: np.generic](shape: _ShapeLike, dtype: _DTypeLike[ScalarT]) -> _MArray[ScalarT]: ... +@overload +def masked_all(shape: _ShapeLike, dtype: DTypeLike = float) -> _MArray[Incomplete]: ... + +# +@overload +def masked_all_like[ScalarT: np.generic](arr: _ArrayLike[ScalarT]) -> _MArray[ScalarT]: ... +@overload +def masked_all_like(arr: ArrayLike) -> _MArray[Incomplete]: ... + +# +def apply_along_axis[**Tss]( + func1d: Callable[Concatenate[MaskedArray, Tss], ArrayLike], + axis: SupportsIndex, + arr: ArrayLike, + *args: Tss.args, + **kwargs: Tss.kwargs, +) -> _MArray[Incomplete]: ... + +# +@overload +def apply_over_axes[ScalarT: np.generic]( + func: Callable[[MaskedArray, int], _ArrayLike[ScalarT]], + a: np.ndarray, + axes: _ShapeLike, +) -> _MArray[ScalarT]: ... +@overload +def apply_over_axes( + func: Callable[[MaskedArray, int], ArrayLike], + a: np.ndarray, + axes: _ShapeLike, +) -> _MArray[Incomplete]: ... + +# keep in sync with `lib._function_base_impl.median` +@overload # known scalar-type, keepdims=False (default) +def median[ScalarT: np.inexact | np.timedelta64]( + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> ScalarT: ... +@overload # float array-like, keepdims=False (default) +def median( + a: _ArrayLikeInt_co | _NestedSequence[float] | float, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.float64: ... +@overload # complex array-like, keepdims=False (default) +def median( + a: _ListSeqND[complex], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.complex128: ... +@overload # complex scalar, keepdims=False (default) +def median( + a: complex, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.complex128 | Any: ... +@overload # known array-type, keepdims=True +def median[ArrayT: _MArray[_ScalarNumeric]]( + a: ArrayT, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> ArrayT: ... +@overload # known scalar-type, keepdims=True +def median[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> _MArray[ScalarT]: ... +@overload # known scalar-type, axis= +def median[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> _MArray[ScalarT]: ... +@overload # float array-like, keepdims=True +def median( + a: _NestedSequence[float], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> _MArray[np.float64]: ... +@overload # float array-like, axis= +def median( + a: _NestedSequence[float], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> _MArray[np.float64]: ... +@overload # complex array-like, keepdims=True +def median( + a: _ListSeqND[complex], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> _MArray[np.complex128]: ... +@overload # complex array-like, axis= +def median( + a: _ListSeqND[complex], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> _MArray[np.complex128]: ... +@overload # out= (keyword) +def median[ArrayT: np.ndarray]( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None = None, + *, + out: ArrayT, + overwrite_input: bool = False, + keepdims: bool = False, +) -> ArrayT: ... +@overload # out= (positional) +def median[ArrayT: np.ndarray]( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None, + out: ArrayT, + overwrite_input: bool = False, + keepdims: bool = False, +) -> ArrayT: ... +@overload # fallback +def median( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> Incomplete: ... + +# +@overload +def compress_nd[ScalarT: np.generic](x: _ArrayLike[ScalarT], axis: _ShapeLike | None = None) -> NDArray[ScalarT]: ... +@overload +def compress_nd(x: ArrayLike, axis: _ShapeLike | None = None) -> NDArray[Incomplete]: ... + +# +@overload +def compress_rowcols[ScalarT: np.generic](x: _ArrayLike[ScalarT], axis: int | None = None) -> _Array2D[ScalarT]: ... +@overload +def compress_rowcols(x: ArrayLike, axis: int | None = None) -> _Array2D[Incomplete]: ... + +# +@overload +def compress_rows[ScalarT: np.generic](a: _ArrayLike[ScalarT]) -> _Array2D[ScalarT]: ... +@overload +def compress_rows(a: ArrayLike) -> _Array2D[Incomplete]: ... + +# +@overload +def compress_cols[ScalarT: np.generic](a: _ArrayLike[ScalarT]) -> _Array2D[ScalarT]: ... +@overload +def compress_cols(a: ArrayLike) -> _Array2D[Incomplete]: ... + +# +def mask_rowcols(a: ArrayLike, axis: SupportsIndex | None = None) -> _MArray[Incomplete]: ... +def mask_rows(a: ArrayLike, axis: _NoValueType = ...) -> _MArray[Incomplete]: ... +def mask_cols(a: ArrayLike, axis: _NoValueType = ...) -> _MArray[Incomplete]: ... + +# keep in sync with `lib._arraysetops_impl.ediff1d` +@overload +def ediff1d( + arr: _ArrayLikeBool_co, + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _MArray1D[np.int8]: ... +@overload +def ediff1d[NumericT: _ScalarNumeric]( + arr: _ArrayLike[NumericT], + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _MArray1D[NumericT]: ... +@overload +def ediff1d( + arr: _ArrayLike[np.datetime64[Any]], + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _MArray1D[np.timedelta64]: ... +@overload +def ediff1d( + arr: _ArrayLikeComplex_co, + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _MArray1D[Incomplete]: ... + +# keep in sync with `lib._arraysetops_impl.unique`, minus `return_counts` +@overload # known scalar-type, FF +def unique[ScalarT: np.generic]( + ar1: _ArrayLike[ScalarT], + return_index: L[False] = False, + return_inverse: L[False] = False, +) -> _MArray[ScalarT]: ... +@overload # unknown scalar-type, FF +def unique( + ar1: ArrayLike, + return_index: L[False] = False, + return_inverse: L[False] = False, +) -> _MArray[Incomplete]: ... +@overload # known scalar-type, TF +def unique[ScalarT: np.generic]( + ar1: _ArrayLike[ScalarT], + return_index: L[True], + return_inverse: L[False] = False, +) -> tuple[_MArray[ScalarT], _IntArray]: ... +@overload # unknown scalar-type, TFF +def unique( + ar1: ArrayLike, + return_index: L[True], + return_inverse: L[False] = False, +) -> tuple[_MArray[Incomplete], _IntArray]: ... +@overload # known scalar-type, FT (positional) +def unique[ScalarT: np.generic]( + ar1: _ArrayLike[ScalarT], + return_index: L[False], + return_inverse: L[True], +) -> tuple[_MArray[ScalarT], _IntArray]: ... +@overload # known scalar-type, FT (keyword) +def unique[ScalarT: np.generic]( + ar1: _ArrayLike[ScalarT], + return_index: L[False] = False, + *, + return_inverse: L[True], +) -> tuple[_MArray[ScalarT], _IntArray]: ... +@overload # unknown scalar-type, FT (positional) +def unique( + ar1: ArrayLike, + return_index: L[False], + return_inverse: L[True], +) -> tuple[_MArray[Incomplete], _IntArray]: ... +@overload # unknown scalar-type, FT (keyword) +def unique( + ar1: ArrayLike, + return_index: L[False] = False, + *, + return_inverse: L[True], +) -> tuple[_MArray[Incomplete], _IntArray]: ... +@overload # known scalar-type, TT +def unique[ScalarT: np.generic]( + ar1: _ArrayLike[ScalarT], + return_index: L[True], + return_inverse: L[True], +) -> tuple[_MArray[ScalarT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TT +def unique( + ar1: ArrayLike, + return_index: L[True], + return_inverse: L[True], +) -> tuple[_MArray[Incomplete], _IntArray, _IntArray]: ... + +# NOTE: we ignore UP047 because inlining `_AnyScalarT` would result in a lot of code duplication + +# keep in sync with `lib._arraysetops_impl.intersect1d` +@overload # known scalar-type, return_indices=False (default) +def intersect1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False +) -> _MArray1D[_AnyScalarT]: ... +@overload # unknown scalar-type, return_indices=False (default) +def intersect1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _MArray1D[Incomplete]: ... + +# keep in sync with `lib._arraysetops_impl.setxor1d` +@overload +def setxor1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False +) -> _MArray1D[_AnyScalarT]: ... +@overload +def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _MArray1D[Incomplete]: ... + +# keep in sync with `lib._arraysetops_impl.union1d` +@overload +def union1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT]) -> _MArray1D[_AnyScalarT]: ... # noqa: UP047 +@overload +def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _MArray1D[Incomplete]: ... + +# keep in sync with `lib._arraysetops_impl.setdiff1d` +@overload +def setdiff1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False +) -> _MArray1D[_AnyScalarT]: ... +@overload +def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _MArray1D[Incomplete]: ... + +# +def in1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False, invert: bool = False) -> _MArray1D[np.bool]: ... + +# keep in sync with `lib._arraysetops_impl.isin` +def isin( + element: ArrayLike, test_elements: ArrayLike, assume_unique: bool = False, invert: bool = False +) -> _MArray[np.bool]: ... + +# keep in sync with `corrcoef` +def cov( + x: ArrayLike, + y: ArrayLike | None = None, + rowvar: bool = True, + bias: bool = False, + allow_masked: bool = True, + ddof: int | None = None +) -> _MArray[Incomplete]: ... + +# keep in sync with `cov` +def corrcoef(x: ArrayLike, y: ArrayLike | None = None, rowvar: bool = True, allow_masked: bool = True) -> _MArray[Incomplete]: ... class MAxisConcatenator(AxisConcatenator): __slots__ = () + # keep in sync with `ma.core.concatenate` + @override # type: ignore[override] + @overload @staticmethod - def concatenate(arrays: Incomplete, axis: int = 0) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def concatenate[ScalarT: np.generic](arrays: _ArrayLike[ScalarT], axis: SupportsIndex | None = 0) -> _MArray[ScalarT]: ... # pyrefly: ignore[bad-override] + @overload + @staticmethod + def concatenate(arrays: SupportsLenAndGetItem[ArrayLike], axis: SupportsIndex | None = 0) -> _MArray[Incomplete]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + @override @classmethod - def makemat(cls, arr: Incomplete) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleVariableOverride] + def makemat(cls, /, arr: ArrayLike) -> _MArray[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleVariableOverride] class mr_class(MAxisConcatenator): __slots__ = () - def __init__(self) -> None: ... -mr_: mr_class +mr_: Final[mr_class] = ... -def ndenumerate(a, compressed=True): ... -def flatnotmasked_edges(a): ... -def notmasked_edges(a, axis=None): ... -def flatnotmasked_contiguous(a): ... -def notmasked_contiguous(a, axis=None): ... -def clump_unmasked(a): ... -def clump_masked(a): ... -def vander(x, n=None): ... -def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): ... +# +@overload +def ndenumerate[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[ScalarT]], + compressed: bool = True, +) -> Iterator[tuple[ShapeT, ScalarT]]: ... +@overload +def ndenumerate[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + compressed: bool = True, +) -> Iterator[tuple[_AnyShape, ScalarT]]: ... +@overload +def ndenumerate( + a: ArrayLike, + compressed: bool = True, +) -> Iterator[tuple[_AnyShape, Incomplete]]: ... + +# +@overload +def flatnotmasked_edges[ScalarT: np.generic](a: _ArrayLike[ScalarT]) -> _Array1D[ScalarT] | None: ... +@overload +def flatnotmasked_edges(a: ArrayLike) -> _Array1D[Incomplete] | None: ... + +# +@overload +def notmasked_edges[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: None = None) -> _Array1D[ScalarT] | None: ... +@overload +def notmasked_edges(a: ArrayLike, axis: None = None) -> _Array1D[Incomplete] | None: ... +@overload +def notmasked_edges(a: ArrayLike, axis: SupportsIndex) -> Incomplete: ... + +# +def flatnotmasked_contiguous(a: ArrayLike) -> list[slice[int, int, None]]: ... # -def mask_rowcols(a: Incomplete, axis: Incomplete | None = None) -> MaskedArray[Incomplete, np.dtype[Incomplete]]: ... +@overload +def notmasked_contiguous(a: ArrayLike, axis: None = None) -> list[slice[int, int, None]]: ... +@overload +def notmasked_contiguous(a: ArrayLike, axis: SupportsIndex) -> list[Incomplete]: ... + +# +def _ezclump(mask: np.ndarray) -> list[slice[int, int, None]]: ... # undocumented +def clump_unmasked(a: np.ndarray) -> list[slice[int, int, None]]: ... +def clump_masked(a: np.ndarray) -> list[slice[int, int, None]]: ... + +# keep in sync with `lib._twodim_base_impl.vander` +@overload +def vander[ScalarT: np.number | np.object_](x: _ArrayLike[ScalarT], n: int | None = None) -> _Array2D[ScalarT]: ... +@overload +def vander(x: _ArrayLike[np.bool] | list[int], n: int | None = None) -> _Array2D[np.int_]: ... +@overload +def vander(x: list[float], n: int | None = None) -> _Array2D[np.float64]: ... +@overload +def vander(x: list[complex], n: int | None = None) -> _Array2D[np.complex128]: ... +@overload # fallback +def vander(x: Sequence[_NumberLike_co], n: int | None = None) -> _Array2D[Any]: ... + +# keep roughly in sync with `lib._polynomial_impl.polyfit` +@overload # float dtype, cov: False (default) +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int, + rcond: bool | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[False] = False +) -> NDArray[np.float64]: ... +@overload # float dtype, cov: True | "unscaled" (keyword) +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int, + rcond: bool | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + *, + cov: L[True, "unscaled"], +) -> _2Tup[NDArray[np.float64]]: ... +@overload # float dtype, full: True (keyword) +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int, + rcond: bool | None = None, + *, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, +) -> _5Tup[NDArray[np.float64]]: ... +@overload # complex dtype, cov: False (default) +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int, + rcond: bool | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[False] = False +) -> NDArray[Incomplete]: ... +@overload # complex dtype, cov: True | "unscaled" (keyword) +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int, + rcond: bool | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + *, + cov: L[True, "unscaled"], +) -> _2Tup[NDArray[np.complex128 | Any]]: ... +@overload # complex dtype, full: True (keyword) +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int, + rcond: bool | None = None, + *, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, +) -> _5Tup[NDArray[np.complex128 | Any]]: ... diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py index bb4a2707fec1..d35bb9b79925 100644 --- a/numpy/ma/mrecords.py +++ b/numpy/ma/mrecords.py @@ -385,7 +385,7 @@ def view(self, dtype=None, type=None): if (getattr(output, '_mask', ma.nomask) is not ma.nomask): mdtype = ma.make_mask_descr(output.dtype) output._mask = self._mask.view(mdtype, np.ndarray) - output._mask.shape = output.shape + output._mask = output._mask.reshape(output.shape) return output def harden_mask(self): diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index 737a34ebdb70..f6b5d6424044 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -1,96 +1,309 @@ -from typing import Any, Generic +from _typeshed import Incomplete, StrPath, SupportsReadline +from collections.abc import Buffer, Sequence +from typing import IO, Any, Generic, Self, SupportsIndex, overload, override from typing_extensions import TypeVar import numpy as np -from numpy._typing import _AnyShape +from numpy import _ByteOrder, _ToIndices +from numpy._typing import ( + ArrayLike, + DTypeLike, + _AnyShape, + _ArrayLikeBool_co, + _DTypeLike, + _HasDType, + _ScalarLike_co, + _Shape, + _ShapeLike, + _VoidDTypeLike, +) from .core import MaskedArray -__all__ = [ - "MaskedRecords", - "mrecarray", - "fromarrays", - "fromrecords", - "fromtextfile", - "addfield", -] +__all__ = ["MaskedRecords", "mrecarray", "fromarrays", "fromrecords", "fromtextfile", "addfield"] -_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) +### + +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) +type _Ignored = object +type _Names = str | Sequence[str] + +### +# mypy: disable-error-code=no-untyped-def + class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co], Generic[_ShapeT_co, _DTypeT_co]): - def __new__( - cls, - shape, - dtype=..., - buf=..., - offset=..., - strides=..., - formats=..., - names=..., - titles=..., - byteorder=..., - aligned=..., - mask=..., - hard_mask=..., - fill_value=..., - keep_mask=..., - copy=..., - **options, - ): ... _mask: Any _fill_value: Any + + def __new__( + cls, + shape: _ShapeLike, + dtype: DTypeLike | None = None, + buf: Buffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + byteorder: _ByteOrder | None = None, + aligned: bool = False, + mask: _ArrayLikeBool_co = ..., + hard_mask: bool = False, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + copy: bool = False, + **options: _Ignored, + ) -> Self: ... + + # @property - def _data(self): ... + @override + def _data(self, /) -> np.recarray[_ShapeT_co, _DTypeT_co]: ... @property - def _fieldmask(self): ... - def __array_finalize__(self, obj): ... - def __len__(self): ... - def __getattribute__(self, attr): ... - def __setattr__(self, attr, val): ... - def __getitem__(self, indx): ... - def __setitem__(self, indx, value): ... - def view(self, dtype=None, type=None): ... - def harden_mask(self): ... - def soften_mask(self): ... - def copy(self): ... - def tolist(self, fill_value=None): ... - def __reduce__(self): ... + def _fieldmask(self, /) -> np.ndarray[_ShapeT_co, np.dtype[np.bool]] | np.bool: ... + + # + @override + def __array_finalize__(self, obj: np.ndarray) -> None: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @override + def __getitem__(self, indx: str | _ToIndices, /) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @override + def __setitem__(self, indx: str | _ToIndices, value: Incomplete, /) -> None: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # unlike `MaskedArray`, these two methods don't return `Self` + @override + def harden_mask(self) -> None: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @override + def soften_mask(self) -> None: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `MaskedArray.view`, but without the `fill_value` + @override # type: ignore[override] + @overload # () + def view(self, /, dtype: None = None, type: None = None) -> Self: ... # pyrefly: ignore[bad-override] + @overload # (dtype: DTypeT) + def view[DTypeT: np.dtype]( + self, /, dtype: DTypeT | _HasDType[DTypeT], type: None = None + ) -> MaskedRecords[_ShapeT_co, DTypeT]: ... + @overload # (dtype: dtype[ScalarT]) + def view[ScalarT: np.generic]( + self, /, dtype: _DTypeLike[ScalarT], type: None = None + ) -> MaskedRecords[_ShapeT_co, np.dtype[ScalarT]]: ... + @overload # ([dtype: _, ]*, type: ArrayT) + def view[ArrayT: np.ndarray](self, /, dtype: DTypeLike | None = None, *, type: type[ArrayT]) -> ArrayT: ... + @overload # (dtype: _, type: ArrayT) + def view[ArrayT: np.ndarray](self, /, dtype: DTypeLike | None, type: type[ArrayT]) -> ArrayT: ... + @overload # (dtype: ArrayT, /) + def view[ArrayT: np.ndarray](self, /, dtype: type[ArrayT], type: None = None) -> ArrayT: ... + @overload # (dtype: ) + def view(self, /, dtype: _VoidDTypeLike | str | None, type: None = None) -> MaskedRecords[_ShapeT_co, np.dtype]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # unlike `MaskedArray` and `ndarray`, this `copy` method has no `order` parameter + @override + def copy(self, /) -> Self: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] mrecarray = MaskedRecords +@overload # known dtype, known shape +def fromarrays[DTypeT: np.dtype, ShapeT: _Shape]( + arraylist: Sequence[ArrayLike], + dtype: DTypeT | _HasDType[DTypeT], + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[ShapeT, DTypeT]: ... +@overload # known dtype, unknown shape +def fromarrays[DTypeT: np.dtype]( + arraylist: Sequence[ArrayLike], + dtype: DTypeT | _HasDType[DTypeT], + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[_AnyShape, DTypeT]: ... +@overload # known scalar-type, known shape +def fromarrays[ScalarT: np.generic, ShapeT: _Shape]( + arraylist: Sequence[ArrayLike], + dtype: _DTypeLike[ScalarT], + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[ShapeT, np.dtype[ScalarT]]: ... +@overload # known scalar-type, unknown shape +def fromarrays[ScalarT: np.generic]( + arraylist: Sequence[ArrayLike], + dtype: _DTypeLike[ScalarT], + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[_AnyShape, np.dtype[ScalarT]]: ... +@overload # unknown dtype, known shape (positional) +def fromarrays[ShapeT: _Shape]( + arraylist: Sequence[ArrayLike], + dtype: DTypeLike | None, + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[ShapeT]: ... +@overload # unknown dtype, known shape (keyword) +def fromarrays[ShapeT: _Shape]( + arraylist: Sequence[ArrayLike], + dtype: DTypeLike | None = None, + *, + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[ShapeT]: ... +@overload # unknown dtype, unknown shape def fromarrays( - arraylist, - dtype=None, - shape=None, - formats=None, - names=None, - titles=None, - aligned=False, - byteorder=None, - fill_value=None, -): ... + arraylist: Sequence[ArrayLike], + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords: ... +# +@overload # known dtype, known shape +def fromrecords[DTypeT: np.dtype, ShapeT: _Shape]( + reclist: ArrayLike, + dtype: DTypeT, + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[ShapeT, DTypeT]: ... +@overload # known dtype, unknown shape +def fromrecords[DTypeT: np.dtype]( + reclist: ArrayLike, + dtype: DTypeT, + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[_AnyShape, DTypeT]: ... +@overload # known scalar-type, known shape +def fromrecords[ScalarT: np.generic, ShapeT: _Shape]( + reclist: ArrayLike, + dtype: _DTypeLike[ScalarT], + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[ShapeT, np.dtype[ScalarT]]: ... +@overload # known scalar-type, unknown shape +def fromrecords[ScalarT: np.generic]( + reclist: ArrayLike, + dtype: _DTypeLike[ScalarT], + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[_AnyShape, np.dtype[ScalarT]]: ... +@overload # unknown dtype, known shape (positional) +def fromrecords[ShapeT: _Shape]( + reclist: ArrayLike, + dtype: DTypeLike | None, + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[ShapeT, np.dtype[Incomplete]]: ... +@overload # unknown dtype, known shape (keyword) +def fromrecords[ShapeT: _Shape]( + reclist: ArrayLike, + dtype: DTypeLike | None = None, + *, + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[ShapeT, np.dtype[Incomplete]]: ... +@overload # unknown dtype, unknown shape def fromrecords( - reclist, - dtype=None, - shape=None, - formats=None, - names=None, - titles=None, - aligned=False, - byteorder=None, - fill_value=None, - mask=..., -): ... + reclist: ArrayLike, + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[_AnyShape, np.dtype[Incomplete]]: ... + +# undocumented +@overload +def openfile(fname: StrPath) -> IO[str]: ... +@overload +def openfile[FileT: SupportsReadline[str]](fname: FileT) -> FileT: ... +# def fromtextfile( - fname, - delimiter=None, - commentchar="#", - missingchar="", - varnames=None, - vartypes=None, -): ... - -def addfield(mrecord, newfield, newfieldname=None): ... + fname: StrPath | SupportsReadline[str], + delimiter: str | None = None, + commentchar: str = "#", + missingchar: str = "", + varnames: Sequence[str] | None = None, + vartypes: Sequence[DTypeLike] | None = None, +) -> MaskedRecords[tuple[int], np.dtype[np.void]]: ... + +# +def addfield[ShapeT: _Shape]( + mrecord: MaskedRecords[ShapeT], + newfield: ArrayLike, + newfieldname: str | None = None, +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index a082f8aa7450..df26ce13c5fa 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -6,6 +6,7 @@ __author__ = "Pierre GF Gerard-Marchant" import copy +import datetime as dt import inspect import itertools import operator @@ -997,6 +998,13 @@ def test_flatten_structured_array(self): control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) + # for strings + ndtype = [('a', 'U5'), ('b', [('c', 'U5')])] + arr = np.array([('NumPy', ('array',)), ('array', ('numpy',))], dtype=ndtype) + test = flatten_structured_array(arr) + control = np.array([['NumPy', 'array'], ['array', 'numpy']], dtype='U5') + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) def test_void0d(self): # Test creating a mvoid object @@ -1371,8 +1379,7 @@ def test_minmax_reduce(self): def test_minmax_funcs_with_output(self): # Tests the min/max functions with explicit outputs mask = np.random.rand(12).round() - xm = array(np.random.uniform(0, 10, 12), mask=mask) - xm.shape = (3, 4) + xm = array(np.random.uniform(0, 10, 12), mask=mask).reshape((3, 4)) for funcname in ('min', 'max'): # Initialize npfunc = getattr(np, funcname) @@ -1394,7 +1401,7 @@ def test_minmax_funcs_with_output(self): def test_minmax_methods(self): # Additional tests on max/min xm = self._create_data()[5] - xm.shape = (xm.size,) + xm = xm.reshape((xm.size,)) assert_equal(xm.max(), 10) assert_(xm[0].max() is masked) assert_(xm[0].max(0) is masked) @@ -1506,7 +1513,10 @@ def test_addsumprod(self): assert_equal(np.prod(x, 0), product(x, 0)) assert_equal(np.prod(filled(xm, 1), axis=0), product(xm, axis=0)) s = (3, 4) - x.shape = y.shape = xm.shape = ym.shape = s + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) if len(s) > 1: assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1)) assert_equal(np.add.reduce(x, 1), add.reduce(x, 1)) @@ -2232,7 +2242,8 @@ def test_assign_dtype(self): a = np.zeros(4, dtype='f4,i4') m = np.ma.array(a) - m.dtype = np.dtype('f4') + with pytest.warns(DeprecationWarning, match="Setting the dtype.*MaskedArray"): + m.dtype = np.dtype('f4') repr(m) # raises? assert_equal(m.dtype, np.dtype('f4')) @@ -2240,7 +2251,9 @@ def test_assign_dtype(self): # are not allowed def assign(): m = np.ma.array(a) - m.dtype = np.dtype('f8') + with pytest.warns(DeprecationWarning, + match="Setting the dtype.*MaskedArray"): + m.dtype = np.dtype('f8') assert_raises(ValueError, assign) b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises? @@ -2249,7 +2262,8 @@ def assign(): # check that nomask is preserved a = np.zeros(4, dtype='f4') m = np.ma.array(a) - m.dtype = np.dtype('f4,i4') + with pytest.warns(DeprecationWarning, match="Setting the dtype.*MaskedArray"): + m.dtype = np.dtype('f4,i4') assert_equal(m.dtype, np.dtype('f4,i4')) assert_equal(m._mask, np.ma.nomask) @@ -2272,6 +2286,32 @@ def test_check_on_scalar(self): assert_raises(TypeError, _check_fill_value, 1e+20, int) assert_raises(TypeError, _check_fill_value, 'stuff', int) + def test_fill_value_datetime_structured(self): + # gh-29818 + rec = np.array([(dt.date(2025, 4, 1),)], dtype=[('foo', '>> np.matlib.empty((2, 2)) # filled with random data matrix([[ 6.76425276e-320, 9.79033856e-307], # random [ 7.39337286e-309, 3.22135945e-309]]) - >>> np.matlib.empty((2, 2), dtype=int) + >>> np.matlib.empty((2, 2), dtype=np.int_) matrix([[ 6600475, 0], # random [ 6586976, 22740995]]) @@ -177,7 +177,7 @@ def identity(n, dtype=None): Examples -------- >>> import numpy.matlib - >>> np.matlib.identity(3, dtype=int) + >>> np.matlib.identity(3, dtype=np.int_) matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) @@ -222,7 +222,7 @@ def eye(n, M=None, k=0, dtype=float, order='C'): Examples -------- >>> import numpy.matlib - >>> np.matlib.eye(3, k=1, dtype=float) + >>> np.matlib.eye(3, k=1, dtype=np.float64) matrix([[0., 1., 0.], [0., 0., 1.], [0., 0., 0.]]) diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index d653a5a6cc98..06a6806e373a 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -1,4 +1,4 @@ -from typing import Any, Literal, TypeAlias, TypeVar, overload +from typing import Any, Literal, overload import numpy as np import numpy.typing as npt @@ -99,6 +99,7 @@ from numpy import ( # noqa: F401 common_type, complex64, complex128, + complex192, complex256, complexfloating, compress, @@ -175,6 +176,7 @@ from numpy import ( # noqa: F401 float16, float32, float64, + float96, float128, float_power, floating, @@ -396,7 +398,6 @@ from numpy import ( # noqa: F401 roots, rot90, round, - row_stack, s_, save, savetxt, @@ -505,9 +506,8 @@ __all__ += np.__all__ ### -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_ScalarT]] -_Order: TypeAlias = Literal["C", "F"] +type _Matrix[ScalarT: np.generic] = np.matrix[tuple[int, int], np.dtype[ScalarT]] +type _Order = Literal["C", "F"] ### @@ -515,7 +515,7 @@ _Order: TypeAlias = Literal["C", "F"] @overload def empty(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +def empty[ScalarT: np.generic](shape: int | tuple[int, int], dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -523,7 +523,7 @@ def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C @overload def ones(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +def ones[ScalarT: np.generic](shape: int | tuple[int, int], dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -531,7 +531,7 @@ def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C" @overload def zeros(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +def zeros[ScalarT: np.generic](shape: int | tuple[int, int], dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -539,7 +539,7 @@ def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C @overload def identity(n: int, dtype: None = None) -> _Matrix[np.float64]: ... @overload -def identity(n: int, dtype: _DTypeLike[_ScalarT]) -> _Matrix[_ScalarT]: ... +def identity[ScalarT: np.generic](n: int, dtype: _DTypeLike[ScalarT]) -> _Matrix[ScalarT]: ... @overload def identity(n: int, dtype: npt.DTypeLike | None = None) -> _Matrix[Any]: ... @@ -553,9 +553,9 @@ def eye( order: _Order = "C", ) -> _Matrix[np.float64]: ... @overload -def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +def eye[ScalarT: np.generic](n: int, M: int | None, k: int, dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload -def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +def eye[ScalarT: np.generic](n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike | None = ..., order: _Order = "C") -> _Matrix[Any]: ... @@ -573,8 +573,8 @@ def randn(arg: int, /, *args: int) -> _Matrix[np.float64]: ... # @overload -def repmat(a: _Matrix[_ScalarT], m: int, n: int) -> _Matrix[_ScalarT]: ... +def repmat[ScalarT: np.generic](a: _Matrix[ScalarT], m: int, n: int) -> _Matrix[ScalarT]: ... @overload -def repmat(a: _ArrayLike[_ScalarT], m: int, n: int) -> npt.NDArray[_ScalarT]: ... +def repmat[ScalarT: np.generic](a: _ArrayLike[ScalarT], m: int, n: int) -> npt.NDArray[ScalarT]: ... @overload def repmat(a: npt.ArrayLike, m: int, n: int) -> npt.NDArray[Any]: ... diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 39b9a935500e..2e63d50cb3a6 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -116,7 +116,7 @@ class matrix(N.ndarray): """ __array_priority__ = 10.0 - def __new__(subtype, data, dtype=None, copy=True): + def __new__(cls, data, dtype=None, copy=True): warnings.warn('the matrix subclass is not the recommended way to ' 'represent matrices or deal with linear algebra (see ' 'https://docs.scipy.org/doc/numpy/user/' @@ -136,7 +136,7 @@ def __new__(subtype, data, dtype=None, copy=True): intype = data.dtype else: intype = N.dtype(dtype) - new = data.view(subtype) + new = data.view(cls) if intype != data.dtype: return new.astype(intype) if copy: @@ -166,9 +166,7 @@ def __new__(subtype, data, dtype=None, copy=True): if not (order or arr.flags.contiguous): arr = arr.copy() - ret = N.ndarray.__new__(subtype, shape, arr.dtype, - buffer=arr, - order=order) + ret = N.ndarray.__new__(cls, shape, arr.dtype, buffer=arr, order=order) return ret def __array_finalize__(self, obj): @@ -182,16 +180,16 @@ def __array_finalize__(self, obj): newshape = tuple(x for x in self.shape if x > 1) ndim = len(newshape) if ndim == 2: - self.shape = newshape + self._set_shape(newshape) return elif (ndim > 2): raise ValueError("shape too large to be a matrix.") else: newshape = self.shape if ndim == 0: - self.shape = (1, 1) + self._set_shape((1, 1)) elif ndim == 1: - self.shape = (1, newshape[0]) + self._set_shape((1, newshape[0])) return def __getitem__(self, index): @@ -215,9 +213,9 @@ def __getitem__(self, index): except Exception: n = 0 if n > 1 and isscalar(index[1]): - out.shape = (sh, 1) + out = out.reshape((sh, 1)) else: - out.shape = (1, sh) + out = out.reshape((1, sh)) return out def __mul__(self, other): @@ -315,11 +313,11 @@ def sum(self, axis=None, dtype=None, out=None): >>> x.sum(axis=1) matrix([[3], [7]]) - >>> x.sum(axis=1, dtype='float') + >>> x.sum(axis=1, dtype=np.float64) matrix([[3.], [7.]]) - >>> out = np.zeros((2, 1), dtype='float') - >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out)) + >>> out = np.zeros((2, 1), dtype=np.float64) + >>> x.sum(axis=1, dtype=np.float64, out=np.asmatrix(out)) matrix([[3.], [7.]]) diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index 40c747d1ae3d..5ec4b6687755 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -1,7 +1,7 @@ from _typeshed import Incomplete from collections.abc import Mapping, Sequence from types import EllipsisType -from typing import Any, ClassVar, Literal as L, Self, SupportsIndex, TypeAlias, overload +from typing import Any, ClassVar, Literal as L, Self, SupportsIndex, overload from typing_extensions import TypeVar import numpy as np @@ -17,27 +17,20 @@ from numpy._typing import ( __all__ = ["asmatrix", "bmat", "matrix"] -_T = TypeVar("_T") -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) -_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool]) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) _ShapeT_co = TypeVar("_ShapeT_co", bound=_2D, default=_2D, covariant=True) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -_2D: TypeAlias = tuple[int, int] -_Matrix: TypeAlias = matrix[_2D, np.dtype[_ScalarT]] -_ToIndex1: TypeAlias = slice | EllipsisType | NDArray[np.integer | np.bool] | _NestedSequence[int] | None -_ToIndex2: TypeAlias = tuple[_ToIndex1, _ToIndex1 | SupportsIndex] | tuple[_ToIndex1 | SupportsIndex, _ToIndex1] +type _2D = tuple[int, int] +type _Matrix[ScalarT: np.generic] = matrix[_2D, np.dtype[ScalarT]] +type _ToIndex1 = slice | EllipsisType | NDArray[np.integer | np.bool] | _NestedSequence[int] | None +type _ToIndex2 = tuple[_ToIndex1, _ToIndex1 | SupportsIndex] | tuple[_ToIndex1 | SupportsIndex, _ToIndex1] + +### class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): __array_priority__: ClassVar[float] = 10.0 # pyright: ignore[reportIncompatibleMethodOverride] - def __new__( - subtype, # pyright: ignore[reportSelfClsParameterName] - data: ArrayLike, - dtype: DTypeLike | None = None, - copy: bool = True, - ) -> _Matrix[Incomplete]: ... + def __new__(cls, data: ArrayLike, dtype: DTypeLike | None = None, copy: bool = True) -> _Matrix[Incomplete]: ... # @overload # type: ignore[override] @@ -57,7 +50,7 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): # def __pow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __rpow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rpow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # keep in sync with `prod` and `mean` @overload # type: ignore[override] @@ -65,9 +58,9 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def sum(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... @overload - def sum(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def sum[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT) -> OutT: ... @overload - def sum(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def sum[OutT: np.ndarray](self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `sum` and `mean` @overload # type: ignore[override] @@ -75,9 +68,9 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def prod(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... @overload - def prod(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def prod[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT) -> OutT: ... @overload - def prod(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def prod[OutT: np.ndarray](self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `sum` and `prod` @overload # type: ignore[override] @@ -85,9 +78,9 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def mean(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... @overload - def mean(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def mean[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT) -> OutT: ... @overload - def mean(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def mean[OutT: np.ndarray](self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `var` @overload # type: ignore[override] @@ -95,11 +88,11 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def std(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... @overload - def std(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + def std[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT, ddof: float = 0) -> OutT: ... @overload - def std( # pyright: ignore[reportIncompatibleMethodOverride] - self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 - ) -> _ArrayT: ... + def std[OutT: np.ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT, ddof: float = 0 + ) -> OutT: ... # keep in sync with `std` @overload # type: ignore[override] @@ -107,11 +100,11 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def var(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... @overload - def var(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + def var[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT, ddof: float = 0) -> OutT: ... @overload - def var( # pyright: ignore[reportIncompatibleMethodOverride] - self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 - ) -> _ArrayT: ... + def var[OutT: np.ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT, ddof: float = 0 + ) -> OutT: ... # keep in sync with `all` @overload # type: ignore[override] @@ -119,9 +112,9 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def any(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... @overload - def any(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + def any[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def any(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def any[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `any` @overload # type: ignore[override] @@ -129,70 +122,70 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def all(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... @overload - def all(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + def all[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def all(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def all[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `min` and `ptp` @overload # type: ignore[override] - def max(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + def max[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> ScalarT: ... @overload def max(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... @overload - def max(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + def max[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def max(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def max[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `max` and `ptp` @overload # type: ignore[override] - def min(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + def min[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> ScalarT: ... @overload def min(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... @overload - def min(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + def min[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def min(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def min[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `max` and `min` @overload - def ptp(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + def ptp[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> ScalarT: ... @overload def ptp(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... @overload - def ptp(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + def ptp[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def ptp(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def ptp[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `argmin` @overload # type: ignore[override] - def argmax(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> np.intp: ... + def argmax[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> np.intp: ... @overload def argmax(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... @overload - def argmax(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + def argmax[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def argmax(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def argmax[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `argmax` @overload # type: ignore[override] - def argmin(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> np.intp: ... + def argmin[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> np.intp: ... @overload def argmin(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... @overload - def argmin(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + def argmin[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def argmin(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def argmin[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # the second overload handles the (rare) case that the matrix is not 2-d @overload - def tolist(self: _Matrix[np.generic[_T]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] + def tolist[T](self: _Matrix[np.generic[T]]) -> list[list[T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] @overload def tolist(self) -> Incomplete: ... # pyright: ignore[reportIncompatibleMethodOverride] # these three methods will at least return a `2-d` array of shape (1, n) def squeeze(self, /, axis: _ShapeLike | None = None) -> matrix[_2D, _DTypeT_co]: ... - def ravel(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def flatten(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def ravel(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] + def flatten(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # matrix.T is inherited from _ScalarOrArrayCommon def getT(self) -> Self: ... diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py index a0e868f5fe2c..397767037b91 100644 --- a/numpy/matrixlib/tests/test_defmatrix.py +++ b/numpy/matrixlib/tests/test_defmatrix.py @@ -323,7 +323,7 @@ def test_basic(self): class TestNewScalarIndexing: a = matrix([[1, 2], [3, 4]]) - def test_dimesions(self): + def test_dimensions(self): a = self.a x = a[0] assert_equal(x.ndim, 2) @@ -373,15 +373,13 @@ def test_row_column_indexing(self): assert_array_equal(x[:, 1], [[0], [1]]) def test_boolean_indexing(self): - A = np.arange(6) - A.shape = (3, 2) + A = np.arange(6).reshape((3, 2)) x = asmatrix(A) assert_array_equal(x[:, np.array([True, False])], x[:, 0]) assert_array_equal(x[np.array([True, False, False]), :], x[0, :]) def test_list_indexing(self): - A = np.arange(6) - A.shape = (3, 2) + A = np.arange(6).reshape((3, 2)) x = asmatrix(A) assert_array_equal(x[:, [1, 0]], x[:, ::-1]) assert_array_equal(x[[2, 1, 0], :], x[::-1, :]) @@ -453,3 +451,25 @@ def test_expand_dims_matrix(self): expanded = np.expand_dims(a, axis=1) assert_equal(expanded.ndim, 3) assert_(not isinstance(expanded, np.matrix)) + + +class TestPatternMatching: + """Tests for structural pattern matching support (PEP 634).""" + + def test_match_sequence_pattern_2d(self): + # matrix is always 2D, so rows are (1, N) matrices not 1D arrays + arr = matrix([[1, 2], [3, 4]]) + # outer matching + match arr: + case [row1, row2]: + assert_array_equal(row1, [[1, 2]]) + assert_array_equal(row2, [[3, 4]]) + case _: + raise AssertionError("2D matrix did not match sequence pattern") + # inner matching - rows are still 2D matrices, not scalars + match arr: + case [[a], [b]]: + assert_array_equal(a, [[1, 2]]) + assert_array_equal(b, [[3, 4]]) + case _: + raise AssertionError("2D matrix did not match sequence pattern") diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index b16b06c8a734..2fdfd24db7a9 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -1,16 +1,7 @@ import abc import decimal from collections.abc import Iterator, Sequence -from typing import ( - Any, - ClassVar, - Generic, - Literal, - Self, - SupportsIndex, - TypeAlias, - overload, -) +from typing import Any, ClassVar, Generic, Literal, Self, SupportsIndex, overload from typing_extensions import TypeIs, TypeVar import numpy as np @@ -38,10 +29,12 @@ from ._polytypes import ( __all__ = ["ABCPolyBase"] _NameT_co = TypeVar("_NameT_co", bound=str | None, default=str | None, covariant=True) -_PolyT = TypeVar("_PolyT", bound=ABCPolyBase) -_AnyOther: TypeAlias = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co -class ABCPolyBase(Generic[_NameT_co], abc.ABC): +type _AnyOther = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co + +### + +class ABCPolyBase(Generic[_NameT_co], abc.ABC): # noqa: UP046 __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] __array_ufunc__: ClassVar[None] = None maxpower: ClassVar[Literal[100]] = 100 @@ -76,7 +69,7 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): # @overload - def __call__(self, /, arg: _PolyT) -> _PolyT: ... + def __call__[PolyT: ABCPolyBase](self, /, arg: PolyT) -> PolyT: ... @overload def __call__(self, /, arg: _FloatLike_co | decimal.Decimal) -> np.float64 | Any: ... @overload @@ -134,22 +127,22 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): # @overload - def convert( + def convert[PolyT: ABCPolyBase]( self, /, domain: _SeriesLikeCoef_co | None, - kind: type[_PolyT], + kind: type[PolyT], window: _SeriesLikeCoef_co | None = None, - ) -> _PolyT: ... + ) -> PolyT: ... @overload - def convert( + def convert[PolyT: ABCPolyBase]( self, /, domain: _SeriesLikeCoef_co | None = None, *, - kind: type[_PolyT], + kind: type[PolyT], window: _SeriesLikeCoef_co | None = None, - ) -> _PolyT: ... + ) -> PolyT: ... @overload def convert( self, diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index b5a603b6ca85..ce37bcf59c50 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -1,5 +1,3 @@ -# ruff: noqa: PYI046 - from collections.abc import Sequence from typing import ( Any, @@ -9,8 +7,6 @@ from typing import ( Self, SupportsIndex, SupportsInt, - TypeAlias, - TypeVar, overload, type_check_only, ) @@ -19,75 +15,75 @@ import numpy as np import numpy.typing as npt from numpy._typing import ( _ArrayLikeComplex_co, - # array-likes _ArrayLikeFloat_co, _ArrayLikeNumber_co, _ComplexLike_co, _FloatLike_co, - # scalar-likes _IntLike_co, _NestedSequence, _NumberLike_co, _SupportsArray, ) -_T = TypeVar("_T") -_T_contra = TypeVar("_T_contra", contravariant=True) -_ScalarT = TypeVar("_ScalarT", bound=np.number | np.bool | np.object_) - # compatible with e.g. int, float, complex, Decimal, Fraction, and ABCPolyBase @type_check_only -class _SupportsCoefOps(Protocol[_T_contra]): +class _SupportsCoefOps[T](Protocol): def __eq__(self, x: object, /) -> bool: ... def __ne__(self, x: object, /) -> bool: ... def __neg__(self, /) -> Self: ... def __pos__(self, /) -> Self: ... - def __add__(self, x: _T_contra, /) -> Self: ... - def __sub__(self, x: _T_contra, /) -> Self: ... - def __mul__(self, x: _T_contra, /) -> Self: ... - def __pow__(self, x: _T_contra, /) -> Self | float: ... - def __radd__(self, x: _T_contra, /) -> Self: ... - def __rsub__(self, x: _T_contra, /) -> Self: ... - def __rmul__(self, x: _T_contra, /) -> Self: ... + def __add__(self, x: T, /) -> Self: ... + def __sub__(self, x: T, /) -> Self: ... + def __mul__(self, x: T, /) -> Self: ... + def __pow__(self, x: T, /) -> Self | float: ... + def __radd__(self, x: T, /) -> Self: ... + def __rsub__(self, x: T, /) -> Self: ... + def __rmul__(self, x: T, /) -> Self: ... + +type _PolyScalar = np.bool | np.number | np.object_ -_Series: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +type _Series[ScalarT: _PolyScalar] = np.ndarray[tuple[int], np.dtype[ScalarT]] -_FloatSeries: TypeAlias = _Series[np.floating] -_ComplexSeries: TypeAlias = _Series[np.complexfloating] -_ObjectSeries: TypeAlias = _Series[np.object_] -_CoefSeries: TypeAlias = _Series[np.inexact | np.object_] +type _FloatSeries = _Series[np.floating] +type _ComplexSeries = _Series[np.complexfloating] +type _ObjectSeries = _Series[np.object_] +type _CoefSeries = _Series[np.inexact | np.object_] -_FloatArray: TypeAlias = npt.NDArray[np.floating] -_ComplexArray: TypeAlias = npt.NDArray[np.complexfloating] -_ObjectArray: TypeAlias = npt.NDArray[np.object_] -_CoefArray: TypeAlias = npt.NDArray[np.inexact | np.object_] +type _FloatArray = npt.NDArray[np.floating] +type _ComplexArray = npt.NDArray[np.complexfloating] +type _ObjectArray = npt.NDArray[np.object_] +type _CoefArray = npt.NDArray[np.inexact | np.object_] -_Tuple2: TypeAlias = tuple[_T, _T] -_Array1: TypeAlias = np.ndarray[tuple[Literal[1]], np.dtype[_ScalarT]] -_Array2: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_ScalarT]] +type _Tuple2[_T] = tuple[_T, _T] +type _Array1[ScalarT: _PolyScalar] = np.ndarray[tuple[Literal[1]], np.dtype[ScalarT]] +type _Array2[ScalarT: _PolyScalar] = np.ndarray[tuple[Literal[2]], np.dtype[ScalarT]] -_AnyInt: TypeAlias = SupportsInt | SupportsIndex +type _AnyInt = SupportsInt | SupportsIndex -_CoefObjectLike_co: TypeAlias = np.object_ | _SupportsCoefOps[Any] -_CoefLike_co: TypeAlias = _NumberLike_co | _CoefObjectLike_co +type _CoefObjectLike_co = np.object_ | _SupportsCoefOps[Any] +type _CoefLike_co = _NumberLike_co | _CoefObjectLike_co # The term "series" is used here to refer to 1-d arrays of numeric scalars. -_SeriesLikeBool_co: TypeAlias = _SupportsArray[np.dtype[np.bool]] | Sequence[bool | np.bool] -_SeriesLikeInt_co: TypeAlias = _SupportsArray[np.dtype[np.integer | np.bool]] | Sequence[_IntLike_co] -_SeriesLikeFloat_co: TypeAlias = _SupportsArray[np.dtype[np.floating | np.integer | np.bool]] | Sequence[_FloatLike_co] -_SeriesLikeComplex_co: TypeAlias = _SupportsArray[np.dtype[np.number | np.bool]] | Sequence[_ComplexLike_co] -_SeriesLikeObject_co: TypeAlias = _SupportsArray[np.dtype[np.object_]] | Sequence[_CoefObjectLike_co] -_SeriesLikeCoef_co: TypeAlias = _SupportsArray[np.dtype[np.number | np.bool | np.object_]] | Sequence[_CoefLike_co] +type _SeriesLikeBool_co = _SupportsArray[np.dtype[np.bool]] | Sequence[bool | np.bool] +type _SeriesLikeInt_co = _SupportsArray[np.dtype[np.integer | np.bool]] | Sequence[_IntLike_co] +type _SeriesLikeFloat_co = _SupportsArray[np.dtype[np.floating | np.integer | np.bool]] | Sequence[_FloatLike_co] +type _SeriesLikeComplex_co = _SupportsArray[np.dtype[np.number | np.bool]] | Sequence[_ComplexLike_co] +type _SeriesLikeObject_co = _SupportsArray[np.dtype[np.object_]] | Sequence[_CoefObjectLike_co] +type _SeriesLikeCoef_co = _SupportsArray[np.dtype[_PolyScalar]] | Sequence[_CoefLike_co] -_ArrayLikeCoefObject_co: TypeAlias = _CoefObjectLike_co | _SeriesLikeObject_co | _NestedSequence[_SeriesLikeObject_co] -_ArrayLikeCoef_co: TypeAlias = npt.NDArray[np.number | np.bool | np.object_] | _ArrayLikeNumber_co | _ArrayLikeCoefObject_co +type _ArrayLikeCoefObject_co = _CoefObjectLike_co | _SeriesLikeObject_co | _NestedSequence[_SeriesLikeObject_co] +type _ArrayLikeCoef_co = npt.NDArray[_PolyScalar] | _ArrayLikeNumber_co | _ArrayLikeCoefObject_co -_Line: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +type _Line[ScalarT: _PolyScalar] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Companion[ScalarT: _PolyScalar] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] + +type _AnyDegrees = Sequence[SupportsIndex] +type _FullFitResult = Sequence[np.inexact | np.int32] @type_check_only class _FuncLine(Protocol): @overload - def __call__(self, /, off: _ScalarT, scl: _ScalarT) -> _Line[_ScalarT]: ... + def __call__[ScalarT: _PolyScalar](self, /, off: ScalarT, scl: ScalarT) -> _Line[ScalarT]: ... @overload def __call__(self, /, off: int, scl: int) -> _Line[np.int_]: ... @overload @@ -297,6 +293,51 @@ class _FuncVal3D(Protocol): c: _SeriesLikeCoef_co, ) -> _SupportsCoefOps[Any]: ... +@type_check_only +class _FuncValND(Protocol): + @overload + def __call__( + self, + /, + pts: Sequence[_FloatLike_co], + c: _SeriesLikeFloat_co, + ) -> np.floating: ... + @overload + def __call__( + self, + /, + pts: Sequence[_NumberLike_co], + c: _SeriesLikeComplex_co, + ) -> np.complexfloating: ... + @overload + def __call__( + self, + /, + pts: Sequence[_ArrayLikeFloat_co], + c: _ArrayLikeFloat_co, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + pts: Sequence[_ArrayLikeComplex_co], + c: _ArrayLikeComplex_co, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + pts: Sequence[_ArrayLikeCoef_co], + c: _ArrayLikeCoef_co, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + pts: Sequence[_CoefLike_co], + c: _SeriesLikeCoef_co, + ) -> _SupportsCoefOps[Any]: ... + @type_check_only class _FuncVander(Protocol): @overload @@ -308,8 +349,6 @@ class _FuncVander(Protocol): @overload def __call__(self, /, x: npt.ArrayLike, deg: SupportsIndex) -> _CoefArray: ... -_AnyDegrees: TypeAlias = Sequence[SupportsIndex] - @type_check_only class _FuncVander2D(Protocol): @overload @@ -360,8 +399,6 @@ class _FuncVander3D(Protocol): deg: _AnyDegrees, ) -> _CoefArray: ... -_FullFitResult: TypeAlias = Sequence[np.inexact | np.int32] - @type_check_only class _FuncFit(Protocol): @overload @@ -476,8 +513,6 @@ class _FuncRoots(Protocol): @overload def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... -_Companion: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] - @type_check_only class _FuncCompanion(Protocol): @overload diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 55b48b905848..653451f2b459 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -44,6 +44,7 @@ chebval chebval2d chebval3d + chebvalnd chebgrid2d chebgrid3d @@ -117,7 +118,7 @@ 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval', 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots', 'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1', - 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d', + 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebvalnd', 'chebgrid2d', 'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion', 'chebgauss', 'chebweight', 'chebinterpolate'] @@ -1301,6 +1302,57 @@ def chebval3d(x, y, z, c): return pu._valnd(chebval, c, x, y, z) +def chebvalnd(pts, c): + r""" + Evaluate an N-D Chebyshev series at points. + + This function returns the values: + + .. math:: + p(pts, c) = \sum_{i_1, i_2, \dots, i_n} + c_{i_1, i_2, \dots, i_n} * T_{i_1}(x_1) * T_{i_2}(x_2) \dots T_{i_n}(x_n) + + where :math:`x_1, x_2, \dots, x_n = pts`. + Note that `pts` may also be an `(n, m)` array. + + The parameters in `pts` are converted to arrays only if they are + tuples or lists, otherwise they are treated as scalars and + they must have the same shape after conversion. In either case, either + the elements of `pts` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than N dimensions, ones are implicitly appended to its + shape to make it N-D. The shape of the result will be c.shape[N:] + + pts[0].shape. + + Parameters + ---------- + pts : tuple or list of array_like, compatible objects + The N-dimensional series is evaluated at the points + ``(x_1, x_2, ..., x_n)`` provided in the `pts` iterable, where + all elements must have the same shape. If any element is a list + or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k,... is contained in ``c[i,j,k,...]``. If `c` has + dimension greater than N, the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional Chebyshev series on points formed + with N-tuples of corresponding values from `pts`. + + See Also + -------- + chebval, chebval2d, chebval3d + + """ + return pu._valnd(chebval, c, *pts) + + def chebgrid3d(x, y, z, c): """ Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z. @@ -1420,7 +1472,7 @@ def chebvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index 1cfb27829b2e..319dcdc6dc4a 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -1,15 +1,6 @@ from _typeshed import ConvertibleToInt from collections.abc import Callable, Iterable -from typing import ( - Any, - ClassVar, - Concatenate, - Final, - Literal as L, - Self, - TypeVar, - overload, -) +from typing import Any, ClassVar, Concatenate, Final, Literal as L, Self, overload import numpy as np import numpy.typing as npt @@ -35,6 +26,7 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, + _FuncValND, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -71,6 +63,7 @@ __all__ = [ "Chebyshev", "chebval2d", "chebval3d", + "chebvalnd", "chebgrid2d", "chebgrid3d", "chebvander2d", @@ -81,15 +74,14 @@ __all__ = [ "chebinterpolate", ] -_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) -_CoefScalarT = TypeVar("_CoefScalarT", bound=np.number | np.bool | np.object_) +### -def _cseries_to_zseries(c: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_to_cseries(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_mul(z1: npt.NDArray[_NumberOrObjectT], z2: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_div(z1: npt.NDArray[_NumberOrObjectT], z2: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_der(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_int(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _cseries_to_zseries[ScalarT: np.number | np.object_](c: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_to_cseries[ScalarT: np.number | np.object_](zs: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_mul[ScalarT: np.number | np.object_](z1: npt.NDArray[ScalarT], z2: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_div[ScalarT: np.number | np.object_](z1: npt.NDArray[ScalarT], z2: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_der[ScalarT: np.number | np.object_](zs: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_int[ScalarT: np.number | np.object_](zs: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... poly2cheb: Final[_FuncPoly2Ortho] = ... cheb2poly: Final[_FuncUnOp] = ... @@ -112,6 +104,7 @@ chebint: Final[_FuncInteg] = ... chebval: Final[_FuncVal] = ... chebval2d: Final[_FuncVal2D] = ... chebval3d: Final[_FuncVal3D] = ... +chebvalnd: Final[_FuncValND] = ... chebgrid2d: Final[_FuncVal2D] = ... chebgrid3d: Final[_FuncVal3D] = ... chebvander: Final[_FuncVander] = ... @@ -133,20 +126,20 @@ def chebinterpolate( args: tuple[()] = (), ) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... @overload -def chebinterpolate( - func: Callable[[npt.NDArray[np.float64]], _CoefScalarT], +def chebinterpolate[CoefScalarT: np.number | np.bool | np.object_]( + func: Callable[[npt.NDArray[np.float64]], CoefScalarT], deg: _IntLike_co, args: tuple[()] = (), -) -> npt.NDArray[_CoefScalarT]: ... +) -> npt.NDArray[CoefScalarT]: ... @overload -def chebinterpolate( - func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefScalarT], +def chebinterpolate[CoefScalarT: np.number | np.bool | np.object_]( + func: Callable[Concatenate[npt.NDArray[np.float64], ...], CoefScalarT], deg: _IntLike_co, args: Iterable[Any], -) -> npt.NDArray[_CoefScalarT]: ... +) -> npt.NDArray[CoefScalarT]: ... class Chebyshev(ABCPolyBase[L["T"]]): - basis_name: ClassVar[L["T"]] = "T" # pyright: ignore[reportIncompatibleMethodOverride] + basis_name: ClassVar[L["T"]] = "T" # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index c6007d19df7f..cb03851ca384 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -40,6 +40,7 @@ hermval hermval2d hermval3d + hermvalnd hermgrid2d hermgrid3d @@ -85,7 +86,7 @@ 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval', 'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots', 'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite', - 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d', + 'hermval2d', 'hermval3d', 'hermvalnd', 'hermgrid2d', 'hermgrid3d', 'hermvander2d', 'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight'] hermtrim = pu.trimcoef @@ -1056,6 +1057,57 @@ def hermval3d(x, y, z, c): return pu._valnd(hermval, c, x, y, z) +def hermvalnd(pts, c): + r""" + Evaluate an N-D Hermite series at points. + + This function returns the values: + + .. math:: + p(pts, c) = \sum_{i_1, i_2, \dots, i_n} + c_{i_1, i_2, \dots, i_n} * H_{i_1}(x_1) * H_{i_2}(x_2) \dots H_{i_n}(x_n) + + where :math:`x_1, x_2, \dots, x_n = pts`. + Note that `pts` may also be an `(n, m)` array. + + The parameters in `pts` are converted to arrays only if they are + tuples or lists, otherwise they are treated as scalars and + they must have the same shape after conversion. In either case, either + the elements of `pts` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than N dimensions, ones are implicitly appended to its + shape to make it N-D. The shape of the result will be c.shape[N:] + + pts[0].shape. + + Parameters + ---------- + pts : tuple or list of array_like, compatible objects + The N-dimensional series is evaluated at the points + ``(x_1, x_2, ..., x_n)`` provided in the `pts` iterable, where + all elements must have the same shape. If any element is a list + or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k,... is contained in ``c[i,j,k,...]``. If `c` has + dimension greater than N, the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + N-tuples of corresponding values from `pts`. + + See Also + -------- + hermval, hermval2d, hermval3d + + """ + return pu._valnd(hermval, c, *pts) + + def hermgrid3d(x, y, z, c): """ Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z. @@ -1198,7 +1250,7 @@ def hermvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares @@ -1543,6 +1595,10 @@ def hermroots(c): m = hermcompanion(c)[::-1, ::-1] r = np.linalg.eigvals(m) r.sort() + + # backwards compat: return real values if possible + from numpy.linalg._linalg import _to_real_if_imag_zero + r = _to_real_if_imag_zero(r, m) return r diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index 04b8238735dd..a1ddc06bf377 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -1,4 +1,4 @@ -from typing import Any, ClassVar, Final, Literal as L, TypeVar +from typing import Any, ClassVar, Final, Literal as L import numpy as np from numpy._typing import _Shape @@ -22,6 +22,7 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, + _FuncValND, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -54,6 +55,7 @@ __all__ = [ "Hermite", "hermval2d", "hermval3d", + "hermvalnd", "hermgrid2d", "hermgrid3d", "hermvander2d", @@ -63,8 +65,6 @@ __all__ = [ "hermweight", ] -_ShapeT = TypeVar("_ShapeT", bound=_Shape) - poly2herm: Final[_FuncPoly2Ortho] = ... herm2poly: Final[_FuncUnOp] = ... @@ -86,6 +86,7 @@ hermint: Final[_FuncInteg] = ... hermval: Final[_FuncVal] = ... hermval2d: Final[_FuncVal2D] = ... hermval3d: Final[_FuncVal3D] = ... +hermvalnd: Final[_FuncValND] = ... hermgrid2d: Final[_FuncVal2D] = ... hermgrid3d: Final[_FuncVal3D] = ... hermvander: Final[_FuncVander] = ... @@ -95,12 +96,15 @@ hermfit: Final[_FuncFit] = ... hermcompanion: Final[_FuncCompanion] = ... hermroots: Final[_FuncRoots] = ... -def _normed_hermite_n(x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... +def _normed_hermite_n[ShapeT: _Shape]( + x: np.ndarray[ShapeT, np.dtype[np.float64]], + n: int, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... hermgauss: Final[_FuncGauss] = ... hermweight: Final[_FuncWeight] = ... class Hermite(ABCPolyBase[L["H"]]): - basis_name: ClassVar[L["H"]] = "H" # pyright: ignore[reportIncompatibleMethodOverride] + basis_name: ClassVar[L["H"]] = "H" # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index f5d82aa543b9..a579f2d90407 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -40,6 +40,7 @@ hermeval hermeval2d hermeval3d + hermevalnd hermegrid2d hermegrid3d @@ -85,7 +86,7 @@ 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly', 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim', - 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d', + 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermevalnd', 'hermegrid2d', 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion', 'hermegauss', 'hermeweight'] @@ -1020,6 +1021,57 @@ def hermeval3d(x, y, z, c): return pu._valnd(hermeval, c, x, y, z) +def hermevalnd(pts, c): + r""" + Evaluate an N-D Hermite_e series at points. + + This function returns the values: + + .. math:: + p(pts, c) = \sum_{i_1, i_2, \dots, i_n} + c_{i_1, i_2, \dots, i_n} * He_{i_1}(x_1) * He_{i_2}(x_2) \dots He_{i_n}(x_n) + + where :math:`x_1, x_2, \dots, x_n = pts`. + Note that `pts` may also be an `(n, m)` array. + + The parameters in `pts` are converted to arrays only if they are + tuples or lists, otherwise they are treated as scalars and + they must have the same shape after conversion. In either case, either + the elements of `pts` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than N dimensions, ones are implicitly appended to its + shape to make it N-D. The shape of the result will be c.shape[N:] + + pts[0].shape. + + Parameters + ---------- + pts : tuple or list of array_like, compatible objects + The N-dimensional series is evaluated at the points + ``(x_1, x_2, ..., x_n)`` provided in the `pts` iterable, where + all elements must have the same shape. If any element is a list + or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k,... is contained in ``c[i,j,k,...]``. If `c` has + dimension greater than N, the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + N-tuples of corresponding values from `pts`. + + See Also + -------- + hermeval, hermeval2d, hermeval3d + + """ + return pu._valnd(hermeval, c, *pts) + + def hermegrid3d(x, y, z, c): """ Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z. @@ -1147,7 +1199,7 @@ def hermevander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index b996de52c6da..ce201c6c1a0a 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -1,4 +1,4 @@ -from typing import Any, ClassVar, Final, Literal as L, TypeVar +from typing import Any, ClassVar, Final, Literal as L import numpy as np from numpy._typing import _Shape @@ -22,6 +22,7 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, + _FuncValND, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -54,6 +55,7 @@ __all__ = [ "HermiteE", "hermeval2d", "hermeval3d", + "hermevalnd", "hermegrid2d", "hermegrid3d", "hermevander2d", @@ -63,8 +65,6 @@ __all__ = [ "hermeweight", ] -_ShapeT = TypeVar("_ShapeT", bound=_Shape) - poly2herme: Final[_FuncPoly2Ortho] = ... herme2poly: Final[_FuncUnOp] = ... @@ -86,6 +86,7 @@ hermeint: Final[_FuncInteg] = ... hermeval: Final[_FuncVal] = ... hermeval2d: Final[_FuncVal2D] = ... hermeval3d: Final[_FuncVal3D] = ... +hermevalnd: Final[_FuncValND] = ... hermegrid2d: Final[_FuncVal2D] = ... hermegrid3d: Final[_FuncVal3D] = ... hermevander: Final[_FuncVander] = ... @@ -95,12 +96,15 @@ hermefit: Final[_FuncFit] = ... hermecompanion: Final[_FuncCompanion] = ... hermeroots: Final[_FuncRoots] = ... -def _normed_hermite_e_n(x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... +def _normed_hermite_e_n[ShapeT: _Shape]( + x: np.ndarray[ShapeT, np.dtype[np.float64]], + n: int, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... hermegauss: Final[_FuncGauss] = ... hermeweight: Final[_FuncWeight] = ... class HermiteE(ABCPolyBase[L["He"]]): - basis_name: ClassVar[L["He"]] = "He" # pyright: ignore[reportIncompatibleMethodOverride] + basis_name: ClassVar[L["He"]] = "He" # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index b1d87bf6d035..d83d3e53e72a 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -40,6 +40,7 @@ lagval lagval2d lagval3d + lagvalnd laggrid2d laggrid3d @@ -84,7 +85,7 @@ 'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd', 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder', 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander', - 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d', + 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d', 'lagvalnd', 'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion', 'laggauss', 'lagweight'] @@ -1045,6 +1046,57 @@ def lagval3d(x, y, z, c): return pu._valnd(lagval, c, x, y, z) +def lagvalnd(pts, c): + r""" + Evaluate an N-D Laguerre series at points. + + This function returns the values: + + .. math:: + p(pts, c) = \sum_{i_1, i_2, \dots, i_n} + c_{i_1, i_2, \dots, i_n} * L_{i_1}(x_1) * L_{i_2}(x_2) \dots L_{i_n}(x_n) + + where :math:`x_1, x_2, \dots, x_n = pts`. + Note that `pts` may also be an `(n, m)` array. + + The parameters in `pts` are converted to arrays only if they are + tuples or lists, otherwise they are treated as scalars and + they must have the same shape after conversion. In either case, either + the elements of `pts` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than N dimensions, ones are implicitly appended to its + shape to make it N-D. The shape of the result will be c.shape[N:] + + pts[0].shape. + + Parameters + ---------- + pts : tuple or list of array_like, compatible objects + The N-dimensional series is evaluated at the points + ``(x_1, x_2, ..., x_n)`` provided in the `pts` iterable, where + all elements must have the same shape. If any element is a list + or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k,... is contained in ``c[i,j,k,...]``. If `c` has + dimension greater than N, the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + N-tuples of corresponding values from `pts`. + + See Also + -------- + lagval, lagval2d, lagval3d + + """ + return pu._valnd(lagval, c, *pts) + + def laggrid3d(x, y, z, c): """ Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z. @@ -1525,6 +1577,10 @@ def lagroots(c): m = lagcompanion(c)[::-1, ::-1] r = np.linalg.eigvals(m) r.sort() + + # backwards compat: return real values if possible + from numpy.linalg._linalg import _to_real_if_imag_zero + r = _to_real_if_imag_zero(r, m) return r diff --git a/numpy/polynomial/laguerre.pyi b/numpy/polynomial/laguerre.pyi index 8b70b899ed59..d6fd5fcd73fb 100644 --- a/numpy/polynomial/laguerre.pyi +++ b/numpy/polynomial/laguerre.pyi @@ -21,6 +21,7 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, + _FuncValND, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -53,6 +54,7 @@ __all__ = [ "Laguerre", "lagval2d", "lagval3d", + "lagvalnd", "laggrid2d", "laggrid3d", "lagvander2d", @@ -83,6 +85,7 @@ lagint: Final[_FuncInteg] = ... lagval: Final[_FuncVal] = ... lagval2d: Final[_FuncVal2D] = ... lagval3d: Final[_FuncVal3D] = ... +lagvalnd: Final[_FuncValND] = ... laggrid2d: Final[_FuncVal2D] = ... laggrid3d: Final[_FuncVal3D] = ... lagvander: Final[_FuncVander] = ... @@ -95,6 +98,6 @@ laggauss: Final[_FuncGauss] = ... lagweight: Final[_FuncWeight] = ... class Laguerre(ABCPolyBase[L["L"]]): - basis_name: ClassVar[L["L"]] = "L" # pyright: ignore[reportIncompatibleMethodOverride] + basis_name: ClassVar[L["L"]] = "L" # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index 237e340cbf45..feedae981b3b 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -42,6 +42,7 @@ legval legval2d legval3d + legvalnd leggrid2d leggrid3d @@ -88,7 +89,7 @@ 'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd', 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder', 'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander', - 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d', + 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d', 'legvalnd', 'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion', 'leggauss', 'legweight'] @@ -1043,6 +1044,57 @@ def legval3d(x, y, z, c): return pu._valnd(legval, c, x, y, z) +def legvalnd(pts, c): + r""" + Evaluate an N-D Legendre series at points. + + This function returns the values: + + .. math:: + p(pts, c) = \sum_{i_1, i_2, \dots, i_n} + c_{i_1, i_2, \dots, i_n} * P_{i_1}(x_1) * P_{i_2}(x_2) \dots P_{i_n}(x_n) + + where :math:`x_1, x_2, \dots, x_n = pts`. + Note that `pts` may also be an `(n, m)` array. + + The parameters in `pts` are converted to arrays only if they are + tuples or lists, otherwise they are treated as scalars and + they must have the same shape after conversion. In either case, either + the elements of `pts` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than N dimensions, ones are implicitly appended to its + shape to make it N-D. The shape of the result will be c.shape[N:] + + pts[0].shape. + + Parameters + ---------- + pts : tuple or list of array_like, compatible objects + The N-dimensional series is evaluated at the points + ``(x_1, x_2, ..., x_n)`` provided in the `pts` iterable, where + all elements must have the same shape. If any element is a list + or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k,... is contained in ``c[i,j,k,...]``. If `c` has + dimension greater than N, the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional Legendre series on points formed + with N-tuples of corresponding values from `pts`. + + See Also + -------- + legval, legval2d, legval3d + + """ + return pu._valnd(legval, c, *pts) + + def leggrid3d(x, y, z, c): """ Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z. @@ -1372,7 +1424,7 @@ def legcompanion(c): """Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an Legendre basis polynomial. This provides + symmetric when `c` is a Legendre basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if `numpy.linalg.eigvalsh` is used to obtain them. diff --git a/numpy/polynomial/legendre.pyi b/numpy/polynomial/legendre.pyi index 53f8f7c210fa..aa0b9918d21c 100644 --- a/numpy/polynomial/legendre.pyi +++ b/numpy/polynomial/legendre.pyi @@ -21,6 +21,7 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, + _FuncValND, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -53,6 +54,7 @@ __all__ = [ "Legendre", "legval2d", "legval3d", + "legvalnd", "leggrid2d", "leggrid3d", "legvander2d", @@ -83,6 +85,7 @@ legint: Final[_FuncInteg] = ... legval: Final[_FuncVal] = ... legval2d: Final[_FuncVal2D] = ... legval3d: Final[_FuncVal3D] = ... +legvalnd: Final[_FuncValND] = ... leggrid2d: Final[_FuncVal2D] = ... leggrid3d: Final[_FuncVal3D] = ... legvander: Final[_FuncVander] = ... @@ -95,6 +98,6 @@ leggauss: Final[_FuncGauss] = ... legweight: Final[_FuncWeight] = ... class Legendre(ABCPolyBase[L["P"]]): - basis_name: ClassVar[L["P"]] = "P" # pyright: ignore[reportIncompatibleMethodOverride] + basis_name: ClassVar[L["P"]] = "P" # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index e3823c89cd98..19cb12b9d02c 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -40,6 +40,7 @@ polyval polyval2d polyval3d + polyvalnd polygrid2d polygrid3d @@ -77,7 +78,7 @@ 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval', 'polyvalfromroots', 'polyder', 'polyint', 'polyfromroots', 'polyvander', 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', - 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d', + 'polyvalnd', 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d', 'polycompanion'] import numpy as np @@ -1011,6 +1012,59 @@ def polyval3d(x, y, z, c): """ return pu._valnd(polyval, c, x, y, z) +def _polyvalnd_dispatcher(pts, c): + return (*pts, c) + +@_array_function_dispatch(_polyvalnd_dispatcher) +def polyvalnd(pts, c): + r""" + Evaluate an N-D polynomial at points. + + This function returns the values: + + .. math:: + p(pts, c) = \sum_{i_1, i_2, \dots, i_n} + c_{i_1, i_2, \dots, i_n} * x_1^{i_1} * x_2^{i_2} \dots x_n^{i_n} + + where :math:`x_1, x_2, \dots, x_n = pts`. + Note that `pts` may also be an `(n, m)` array. + + The parameters in `pts` are converted to arrays only if they are + tuples or lists, otherwise they are treated as scalars and + they must have the same shape after conversion. In either case, either + the elements of `pts` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than N dimensions, ones are implicitly appended to its + shape to make it N-D. The shape of the result will be c.shape[N:] + + pts[0].shape. + + Parameters + ---------- + pts : tuple or list of array_like, compatible objects + The N-dimensional series is evaluated at the points + ``(x_1, x_2, ..., x_n)`` provided in the `pts` iterable, where + all elements must have the same shape. If any element is a list + or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k,... is contained in ``c[i,j,k,...]``. If `c` has + dimension greater than N, the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + N-tuples of corresponding values from `pts`. + + See Also + -------- + polyval, polyval2d, polyval3d + + """ + return pu._valnd(polyval, c, *pts) def polygrid3d(x, y, z, c): """ @@ -1154,7 +1208,7 @@ def polyvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares @@ -1547,6 +1601,10 @@ def polyroots(c): m = polycompanion(c) r = np.linalg.eigvals(m) r.sort() + + # backwards compat: return real values if possible + from numpy.linalg._linalg import _to_real_if_imag_zero + r = _to_real_if_imag_zero(r, m) return r diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi index 86f288468a15..5c1d420c59ae 100644 --- a/numpy/polynomial/polynomial.pyi +++ b/numpy/polynomial/polynomial.pyi @@ -27,6 +27,7 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, + _FuncValND, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -57,6 +58,7 @@ __all__ = [ "Polynomial", "polyval2d", "polyval3d", + "polyvalnd", "polygrid2d", "polygrid3d", "polyvander2d", @@ -82,6 +84,7 @@ polyint: Final[_FuncInteg] = ... polyval: Final[_FuncVal] = ... polyval2d: Final[_FuncVal2D] = ... polyval3d: Final[_FuncVal3D] = ... +polyvalnd: Final[_FuncValND] = ... @overload def polyvalfromroots(x: _FloatLike_co, r: _FloatLike_co, tensor: bool = True) -> np.float64 | Any: ... @@ -104,6 +107,6 @@ polycompanion: Final[_FuncCompanion] = ... polyroots: Final[_FuncRoots] = ... class Polynomial(ABCPolyBase[None]): - basis_name: ClassVar[None] = None # pyright: ignore[reportIncompatibleMethodOverride] + basis_name: ClassVar[None] = None # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index 79ca317c12b0..fbaaf7d22880 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -1,14 +1,5 @@ from collections.abc import Callable, Iterable, Sequence -from typing import ( - Final, - Literal, - Protocol, - SupportsIndex, - TypeAlias, - TypeVar, - overload, - type_check_only, -) +from typing import Final, Literal, Protocol, SupportsIndex, overload, type_check_only import numpy as np import numpy.typing as npt @@ -44,16 +35,13 @@ from ._polytypes import ( __all__ = ["as_series", "format_float", "getdomain", "mapdomain", "mapparms", "trimcoef", "trimseq"] -_T = TypeVar("_T") -_SeqT = TypeVar("_SeqT", bound=_CoefArray | Sequence[_CoefLike_co]) - -_AnyLineF: TypeAlias = Callable[[float, float], _CoefArray] -_AnyMulF: TypeAlias = Callable[[np.ndarray | list[int], np.ndarray], _CoefArray] -_AnyVanderF: TypeAlias = Callable[[np.ndarray, int], _CoefArray] +type _AnyLineF = Callable[[float, float], _CoefArray] +type _AnyMulF = Callable[[np.ndarray | list[int], np.ndarray], _CoefArray] +type _AnyVanderF = Callable[[np.ndarray, int], _CoefArray] @type_check_only -class _ValFunc(Protocol[_T]): - def __call__(self, x: np.ndarray, c: _T, /, *, tensor: bool = True) -> _T: ... +class _ValFunc[T](Protocol): + def __call__(self, x: np.ndarray, c: T, /, *, tensor: bool = True) -> T: ... ### @@ -77,7 +65,7 @@ def as_series(alist: Iterable[_SeriesLikeComplex_co | complex], trim: bool = Tru def as_series(alist: Iterable[_SeriesLikeCoef_co | object], trim: bool = True) -> list[_ObjectSeries]: ... # -def trimseq(seq: _SeqT) -> _SeqT: ... +def trimseq[SeqT: _CoefArray | Sequence[_CoefLike_co]](seq: SeqT) -> SeqT: ... # @overload @@ -219,10 +207,10 @@ def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeObject_co) def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeCoef_co) -> _CoefSeries: ... # keep in sync with `_gridnd` -def _valnd(val_f: _ValFunc[_T], c: _T, *args: npt.ArrayLike) -> _T: ... +def _valnd[T](val_f: _ValFunc[T], c: T, *args: npt.ArrayLike) -> T: ... # keep in sync with `_valnd` -def _gridnd(val_f: _ValFunc[_T], c: _T, *args: npt.ArrayLike) -> _T: ... +def _gridnd[T](val_f: _ValFunc[T], c: T, *args: npt.ArrayLike) -> T: ... # keep in sync with `_polytypes._FuncBinOp` @overload diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py index 14777ac60375..ff2e5846852e 100644 --- a/numpy/polynomial/tests/test_chebyshev.py +++ b/numpy/polynomial/tests/test_chebyshev.py @@ -185,6 +185,27 @@ def test_chebval3d(self): res = cheb.chebval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) + def test_chebvalnd(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + pts = (x1, x2, x3) + + # test exceptions + assert_raises(ValueError, cheb.chebvalnd, (x1, x2, x3[:2]), self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = cheb.chebvalnd(pts, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = cheb.chebvalnd((z, z, z), self.c3d) + assert_(res.shape == (2, 3)) + + # test 1D fallback + assert_almost_equal(cheb.chebvalnd((x1,), self.c1d), y1) + def test_chebgrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py index 156dccf6ea88..93952745cf82 100644 --- a/numpy/polynomial/tests/test_classes.py +++ b/numpy/polynomial/tests/test_classes.py @@ -315,7 +315,11 @@ def test_truediv(Poly): p2 = p1 * 5 for stype in np.ScalarType: - if not issubclass(stype, Number) or issubclass(stype, bool): + if ( + not issubclass(stype, Number) + or issubclass(stype, bool) + or issubclass(stype, np.timedelta64) + ): continue s = stype(5) assert_poly_almost_equal(op.truediv(p2, s), p1) @@ -328,6 +332,14 @@ def test_truediv(Poly): s = stype(5, 0) assert_poly_almost_equal(op.truediv(p2, s), p1) assert_raises(TypeError, op.truediv, s, p2) + for stype in [np.timedelta64]: + s = stype(5, 'D') + with pytest.warns( + DeprecationWarning, + match="The 'generic' unit for NumPy timedelta is deprecated", + ): + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) for s in [(), [], {}, False, np.array([1])]: assert_raises(TypeError, op.truediv, p2, s) assert_raises(TypeError, op.truediv, s, p2) diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py index a289ba0b50cc..aeb6d3649080 100644 --- a/numpy/polynomial/tests/test_hermite.py +++ b/numpy/polynomial/tests/test_hermite.py @@ -172,6 +172,27 @@ def test_hermval3d(self): res = herm.hermval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) + def test_hermvalnd(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + pts = (x1, x2, x3) + + # test exceptions + assert_raises(ValueError, herm.hermvalnd, (x1, x2, x3[:2]), self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = herm.hermvalnd(pts, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herm.hermvalnd((z, z, z), self.c3d) + assert_(res.shape == (2, 3)) + + # test 1D fallback + assert_almost_equal(herm.hermvalnd((x1,), self.c1d), y1) + def test_hermgrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py index 233dfb28254a..6ff1d16e489f 100644 --- a/numpy/polynomial/tests/test_hermite_e.py +++ b/numpy/polynomial/tests/test_hermite_e.py @@ -172,6 +172,27 @@ def test_hermeval3d(self): res = herme.hermeval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) + def test_hermevalnd(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + pts = (x1, x2, x3) + + # test exceptions + assert_raises(ValueError, herme.hermevalnd, (x1, x2, x3[:2]), self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = herme.hermevalnd(pts, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herme.hermevalnd((z, z, z), self.c3d) + assert_(res.shape == (2, 3)) + + # test 1D fallback + assert_almost_equal(herme.hermevalnd((x1,), self.c1d), y1) + def test_hermegrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py index 884f15a9fe8f..1c49b6696b8d 100644 --- a/numpy/polynomial/tests/test_laguerre.py +++ b/numpy/polynomial/tests/test_laguerre.py @@ -169,6 +169,27 @@ def test_lagval3d(self): res = lag.lagval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) + def test_lagvalnd(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + pts = (x1, x2, x3) + + # test exceptions + assert_raises(ValueError, lag.lagvalnd, (x1, x2, x3[:2]), self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = lag.lagvalnd(pts, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = lag.lagvalnd((z, z, z), self.c3d) + assert_(res.shape == (2, 3)) + + # test 1D fallback + assert_almost_equal(lag.lagvalnd((x1,), self.c1d), y1) + def test_laggrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index 6c87f44ee707..654a6abdd536 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -173,6 +173,27 @@ def test_legval3d(self): res = leg.legval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) + def test_legvalnd(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + pts = (x1, x2, x3) + + # test exceptions + assert_raises(ValueError, leg.legvalnd, (x1, x2, x3[:2]), self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = leg.legvalnd(pts, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = leg.legvalnd((z, z, z), self.c3d) + assert_(res.shape == (2, 3)) + + # test 1D fallback + assert_almost_equal(leg.legvalnd((x1,), self.c1d), y1) + def test_leggrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 4c924a758b06..413803a4e099 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -297,6 +297,28 @@ def test_polyval3d(self): res = poly.polyval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) + def test_polyvalnd(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + pts = (x1, x2, x3) + + # test exceptions + assert_raises_regex(ValueError, 'incompatible', + poly.polyvalnd, (x1, x2, x3[:2]), self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = poly.polyvalnd(pts, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = poly.polyvalnd((z, z, z), self.c3d) + assert_(res.shape == (2, 3)) + + # test 1D fallback + assert_almost_equal(poly.polyvalnd((x1,), self.c1d), y1) + def test_polygrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y diff --git a/numpy/polynomial/tests/test_printing.py b/numpy/polynomial/tests/test_printing.py index f7d0131c94a9..fe56f9e16bef 100644 --- a/numpy/polynomial/tests/test_printing.py +++ b/numpy/polynomial/tests/test_printing.py @@ -491,12 +491,13 @@ def test_numeric_object_coefficients(self): '1.23 + 0.12 x + (1.23e-02) x**2 + (1.23e-03) x**3', '1.235 + 0.123 x + (1.235e-02) x**2 + (1.235e-03) x**3', '1.2346 + 0.1235 x + 0.0123 x**2 + (1.2346e-03) x**3 + (1.2346e-04) x**4', - '1.23457 + 0.12346 x + 0.01235 x**2 + (1.23457e-03) x**3 + ' - '(1.23457e-04) x**4', - '1.234568 + 0.123457 x + 0.012346 x**2 + 0.001235 x**3 + ' - '(1.234568e-04) x**4 + (1.234568e-05) x**5', - '1.2345679 + 0.1234568 x + 0.0123457 x**2 + 0.0012346 x**3 + ' - '(1.2345679e-04) x**4 + (1.2345679e-05) x**5') + ('1.23457 + 0.12346 x + 0.01235 x**2 + (1.23457e-03) x**3 + ' + '(1.23457e-04) x**4'), + ('1.234568 + 0.123457 x + 0.012346 x**2 + 0.001235 x**3 + ' + '(1.234568e-04) x**4 + (1.234568e-05) x**5'), + ('1.2345679 + 0.1234568 x + 0.0123457 x**2 + 0.0012346 x**3 + ' + '(1.2345679e-04) x**4 + (1.2345679e-05) x**5') +) class TestPrintOptions: """ @@ -550,7 +551,7 @@ def test_switch_to_exp(self): def test_non_finite(self): p = poly.Polynomial([nan, inf]) assert str(p) == 'nan + inf x' - assert p._repr_latex_() == r'$x \mapsto \text{nan} + \text{inf}\,x$' # noqa: RUF027 + assert p._repr_latex_() == r'$x \mapsto \text{nan} + \text{inf}\,x$' with printoptions(nanstr='NAN', infstr='INF'): assert str(p) == 'NAN + INF x' assert p._repr_latex_() == \ diff --git a/numpy/random/__init__.pyi b/numpy/random/__init__.pyi index e9b9fb50ab8c..f949c5aef113 100644 --- a/numpy/random/__init__.pyi +++ b/numpy/random/__init__.pyi @@ -16,7 +16,7 @@ from .mtrand import ( f, gamma, geometric, - get_bit_generator, # noqa: F401 + get_bit_generator, get_state, gumbel, hypergeometric, @@ -44,7 +44,7 @@ from .mtrand import ( rayleigh, sample, seed, - set_bit_generator, # noqa: F401 + set_bit_generator, set_state, shuffle, standard_cauchy, diff --git a/numpy/random/_common.pxd b/numpy/random/_common.pxd index 0de4456d778f..7b6ae56bfe12 100644 --- a/numpy/random/_common.pxd +++ b/numpy/random/_common.pxd @@ -26,12 +26,15 @@ cdef enum ConstraintType: LEGACY_CONS_NON_NEGATIVE_INBOUNDS_LONG ctypedef ConstraintType constraint_type +ctypedef fused double_or_int64: + double + int64_t cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method) cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output) cdef object prepare_cffi(bitgen_t *bitgen) cdef object prepare_ctypes(bitgen_t *bitgen) -cdef int check_constraint(double val, object name, constraint_type cons) except -1 +cdef int check_constraint(double_or_int64 val, object name, constraint_type cons) except -1 cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1 cdef extern from "include/aligned_malloc.h": diff --git a/numpy/random/_common.pyi b/numpy/random/_common.pyi index b667fd1c82eb..417387612014 100644 --- a/numpy/random/_common.pyi +++ b/numpy/random/_common.pyi @@ -1,11 +1,12 @@ +from _typeshed import Incomplete from collections.abc import Callable -from typing import Any, NamedTuple, TypeAlias +from typing import NamedTuple import numpy as np -__all__: list[str] = ["interface"] +__all__ = ["interface"] -_CDataVoidPointer: TypeAlias = Any +type _CDataVoidPointer = Incomplete # currently not expressible class interface(NamedTuple): state_address: int diff --git a/numpy/random/_common.pyx b/numpy/random/_common.pyx index 1fc2f7a02e11..22e0b028e703 100644 --- a/numpy/random/_common.pyx +++ b/numpy/random/_common.pyx @@ -425,12 +425,13 @@ cdef int check_array_constraint(np.ndarray val, object name, constraint_type con return 0 -cdef int check_constraint(double val, object name, constraint_type cons) except -1: +cdef int check_constraint(double_or_int64 val, object name, constraint_type cons) except -1: if cons == CONS_NON_NEGATIVE: - if not isnan(val) and signbit(val): + if ((double_or_int64 is double and not isnan(val) and signbit(val)) or + (double_or_int64 is int64_t and val < 0)): raise ValueError(f"{name} < 0") elif cons == CONS_POSITIVE or cons == CONS_POSITIVE_NOT_NAN: - if cons == CONS_POSITIVE_NOT_NAN and isnan(val): + if cons == CONS_POSITIVE_NOT_NAN and double_or_int64 is double and isnan(val): raise ValueError(f"{name} must not be NaN") elif val <= 0: raise ValueError(f"{name} <= 0") diff --git a/numpy/random/_examples/cffi/extending.py b/numpy/random/_examples/cffi/extending.py index ad4c9acbdceb..08cf50491cc8 100644 --- a/numpy/random/_examples/cffi/extending.py +++ b/numpy/random/_examples/cffi/extending.py @@ -32,7 +32,7 @@ interface = rng.bit_generator.cffi n = 100 -vals_cffi = ffi.new('double[%d]' % n) +vals_cffi = ffi.new(f'double[{n}]') lib.random_standard_normal_fill(interface.bit_generator, n, vals_cffi) # reset the state diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 1f7c342394e1..634aaf68912c 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -1,471 +1,534 @@ +# Aliases for builtins shadowed by classes to avoid annotations resolving to class members by ty +from builtins import bytes as py_bytes from collections.abc import Callable, MutableSequence -from typing import Any, Literal, TypeAlias, TypeVar, overload +from typing import Any, Literal, Self, overload import numpy as np -from numpy import dtype, float32, float64, int64 from numpy._typing import ( ArrayLike, DTypeLike, NDArray, + _ArrayLike, _ArrayLikeFloat_co, _ArrayLikeInt_co, - _BoolCodes, - _DoubleCodes, _DTypeLike, - _DTypeLikeBool, _Float32Codes, _Float64Codes, _FloatLike_co, - _Int8Codes, - _Int16Codes, - _Int32Codes, _Int64Codes, - _IntPCodes, + _NestedSequence, _ShapeLike, - _SingleCodes, - _SupportsDType, - _UInt8Codes, - _UInt16Codes, - _UInt32Codes, - _UInt64Codes, - _UIntPCodes, ) -from numpy.random import BitGenerator, RandomState, SeedSequence -_IntegerT = TypeVar("_IntegerT", bound=np.integer) +from .bit_generator import BitGenerator, SeedSequence +from .mtrand import RandomState -_DTypeLikeFloat32: TypeAlias = ( - dtype[float32] - | _SupportsDType[dtype[float32]] - | type[float32] - | _Float32Codes - | _SingleCodes -) +type _ArrayF32 = NDArray[np.float32] +type _ArrayF64 = NDArray[np.float64] -_DTypeLikeFloat64: TypeAlias = ( - dtype[float64] - | _SupportsDType[dtype[float64]] - | type[float] - | type[float64] - | _Float64Codes - | _DoubleCodes -) +type _DTypeLikeI64 = _DTypeLike[np.int64] | _Int64Codes +type _DTypeLikeF32 = _DTypeLike[np.float32] | _Float32Codes +type _DTypeLikeF64 = type[float] | _DTypeLike[np.float64] | _Float64Codes +# we use `str` to avoid type-checker performance issues because of the many `Literal` variants +type _DTypeLikeFloat = type[float] | _DTypeLike[np.float32 | np.float64] | str + +# Similar to `_ArrayLike{}_co`, but rejects scalars +type _NDArrayLikeInt = NDArray[np.generic[int]] | _NestedSequence[int] +type _NDArrayLikeFloat = NDArray[np.generic[float]] | _NestedSequence[float] + +type _MethodExp = Literal["zig", "inv"] + +### class Generator: def __init__(self, bit_generator: BitGenerator) -> None: ... - def __repr__(self) -> str: ... - def __str__(self) -> str: ... - def __getstate__(self) -> None: ... def __setstate__(self, state: dict[str, Any] | None) -> None: ... - def __reduce__(self) -> tuple[ - Callable[[BitGenerator], Generator], - tuple[BitGenerator], - None]: ... + def __reduce__(self) -> tuple[Callable[[BitGenerator], Generator], tuple[BitGenerator], None]: ... + + # @property def bit_generator(self) -> BitGenerator: ... - def spawn(self, n_children: int) -> list[Generator]: ... - def bytes(self, length: int) -> bytes: ... - @overload - def standard_normal( # type: ignore[misc] - self, - size: None = None, - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = None, - ) -> float: ... - @overload - def standard_normal( # type: ignore[misc] - self, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_normal( # type: ignore[misc] - self, - *, - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_normal( # type: ignore[misc] - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = None, - ) -> NDArray[float32]: ... - @overload - def standard_normal( # type: ignore[misc] - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def permutation(self, x: int, axis: int = 0) -> NDArray[int64]: ... - @overload - def permutation(self, x: ArrayLike, axis: int = 0) -> NDArray[Any]: ... - @overload - def standard_exponential( # type: ignore[misc] - self, - size: None = None, - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - method: Literal["zig", "inv"] = "zig", - out: None = None, - ) -> float: ... - @overload - def standard_exponential( - self, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + def spawn(self, n_children: int) -> list[Self]: ... + def bytes(self, length: int) -> py_bytes: ... + + # continuous distributions + + # @overload - def standard_exponential( - self, - *, - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... + def standard_cauchy(self, size: None = None) -> float: ... @overload + def standard_cauchy(self, size: _ShapeLike) -> _ArrayF64: ... + + # + @overload # size=None (default); NOTE: dtype is ignored + def random(self, size: None = None, dtype: _DTypeLikeFloat = ..., out: None = None) -> float: ... + @overload # size=, dtype=f64 (default) + def random(self, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., out: None = None) -> _ArrayF64: ... + @overload # size=, dtype=f32 + def random(self, size: _ShapeLike, dtype: _DTypeLikeF32, out: None = None) -> _ArrayF32: ... + @overload # out: f64 array (keyword) + def random[ArrayT: _ArrayF64](self, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., *, out: ArrayT) -> ArrayT: ... + @overload # dtype: f32 (keyword), out: f64 array + def random[ArrayT: _ArrayF32](self, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, out: ArrayT) -> ArrayT: ... + @overload # out: f64 array (positional) + def random[ArrayT: _ArrayF64](self, size: _ShapeLike | None, dtype: _DTypeLikeF64, out: ArrayT) -> ArrayT: ... + @overload # dtype: f32 (positional), out: f32 array + def random[ArrayT: _ArrayF32](self, size: _ShapeLike | None, dtype: _DTypeLikeF32, out: ArrayT) -> ArrayT: ... + + # + @overload # size=None (default); NOTE: dtype is ignored + def standard_normal(self, size: None = None, dtype: _DTypeLikeFloat = ..., out: None = None) -> float: ... + @overload # size=, dtype: f64 (default) + def standard_normal(self, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., out: None = None) -> _ArrayF64: ... + @overload # size=, dtype: f32 + def standard_normal(self, size: _ShapeLike, dtype: _DTypeLikeF32, *, out: None = None) -> _ArrayF32: ... + @overload # dtype: f64 (default), out: f64 array (keyword) + def standard_normal[ArrayT: _ArrayF64]( + self, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., *, out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (keyword), out: f32 array + def standard_normal[ArrayT: _ArrayF32]( + self, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (positional), out: f32 array + def standard_normal[ArrayT: _ArrayF32](self, size: _ShapeLike | None, dtype: _DTypeLikeF32, out: ArrayT) -> ArrayT: ... + + # + @overload # size=None (default); NOTE: dtype is ignored def standard_exponential( - self, - size: _ShapeLike | None = None, - *, - method: Literal["zig", "inv"] = "zig", - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload + self, size: None = None, dtype: _DTypeLikeFloat = ..., method: _MethodExp = "zig", out: None = None + ) -> float: ... + @overload # size=, dtype: f64 (default) def standard_exponential( - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat32 = ..., - method: Literal["zig", "inv"] = "zig", - out: NDArray[float32] | None = None, - ) -> NDArray[float32]: ... - @overload + self, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., method: _MethodExp = "zig", out: None = None + ) -> _ArrayF64: ... + @overload # size=, dtype: f32 (default) def standard_exponential( - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat64 = ..., - method: Literal["zig", "inv"] = "zig", - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def random( # type: ignore[misc] - self, - size: None = None, - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = None, + self, size: _ShapeLike, dtype: _DTypeLikeF32, method: _MethodExp = "zig", out: None = None + ) -> _ArrayF32: ... + @overload # dtype: f64 (default), out: f64 array (keyword) + def standard_exponential[ArrayT: _ArrayF64]( + self, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., method: _MethodExp = "zig", *, out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (keyword), out: f32 array + def standard_exponential[ArrayT: _ArrayF32]( + self, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, method: _MethodExp = "zig", out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (positional), out: f32 array (keyword) + def standard_exponential[ArrayT: _ArrayF32]( + self, size: _ShapeLike | None, dtype: _DTypeLikeF32, method: _MethodExp = "zig", *, out: ArrayT + ) -> ArrayT: ... + + # + @overload # 0d, size=None (default); NOTE: dtype is ignored + def standard_gamma( + self, shape: _FloatLike_co, size: None = None, dtype: _DTypeLikeFloat = ..., out: None = None ) -> float: ... - @overload - def random( - self, - *, - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def random( - self, - size: _ShapeLike | None = None, - *, - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def random( - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = None, - ) -> NDArray[float32]: ... - @overload - def random( - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def beta( - self, - a: _FloatLike_co, - b: _FloatLike_co, - size: None = None, - ) -> float: ... # type: ignore[misc] - @overload - def beta( - self, - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def exponential(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... # type: ignore[misc] - @overload - def exponential(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[float64]: ... + @overload # >0d, dtype: f64 (default) + def standard_gamma( + self, shape: _NDArrayLikeFloat, size: None = None, dtype: _DTypeLikeF64 = ..., out: None = None + ) -> _ArrayF64: ... + @overload # >0d, dtype: f32 (keyword) + def standard_gamma( + self, shape: _NDArrayLikeFloat, size: None = None, *, dtype: _DTypeLikeF32, out: None = None + ) -> _ArrayF32: ... + @overload # >=0d, dtype: f64 (default) + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: None = None, dtype: _DTypeLikeF64 = ..., out: None = None + ) -> _ArrayF64 | Any: ... + @overload # >=0d, dtype: f32 (keyword) + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: None = None, *, dtype: _DTypeLikeF32, out: None = None + ) -> _ArrayF32 | Any: ... + @overload # >=0d, size=, dtype: f64 (default) + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., out: None = None + ) -> _ArrayF64: ... + @overload # >=0d, size=, dtype: f32 + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike, dtype: _DTypeLikeF32, *, out: None = None + ) -> _ArrayF32: ... + @overload # >=0d, dtype: f64 (default), out: f64 array (keyword) + def standard_gamma[ArrayT: _ArrayF64]( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., *, out: ArrayT + ) -> ArrayT: ... + @overload # >=0d, dtype: f32 (keyword), out: f32 array + def standard_gamma[ArrayT: _ArrayF32]( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, out: ArrayT + ) -> ArrayT: ... + @overload # >=0d, dtype: f32 (positional), out: f32 array + def standard_gamma[ArrayT: _ArrayF32]( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None, dtype: _DTypeLikeF32, out: ArrayT + ) -> ArrayT: ... # - @overload + @overload # 0d + def power(self, /, a: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def power(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def power(self, /, a: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def power(self, /, a: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def pareto(self, /, a: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def pareto(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def pareto(self, /, a: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def pareto(self, /, a: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def weibull(self, /, a: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def weibull(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def weibull(self, /, a: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def weibull(self, /, a: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def standard_t(self, /, df: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def standard_t(self, /, df: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def standard_t(self, /, df: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def standard_t(self, /, df: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def chisquare(self, /, df: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def chisquare(self, /, df: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def chisquare(self, /, df: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def chisquare(self, /, df: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default) + def exponential(self, /, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (keyword) + def exponential(self, /, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # size= (positional) + def exponential(self, /, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def exponential(self, /, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def exponential(self, /, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default) + def rayleigh(self, /, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (keyword) + def rayleigh(self, /, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # size= (positional) + def rayleigh(self, /, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def rayleigh(self, /, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def rayleigh(self, /, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def noncentral_chisquare(self, /, df: _FloatLike_co, nonc: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def noncentral_chisquare(self, /, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def noncentral_chisquare(self, /, df: _ArrayLikeFloat_co, nonc: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def noncentral_chisquare(self, /, df: _NDArrayLikeFloat, nonc: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def noncentral_chisquare(self, /, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def f(self, /, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def f(self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def f(self, /, dfnum: _ArrayLikeFloat_co, dfden: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def f(self, /, dfnum: _NDArrayLikeFloat, dfden: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def f(self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def vonmises(self, /, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def vonmises(self, /, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def vonmises(self, /, mu: _ArrayLikeFloat_co, kappa: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def vonmises(self, /, mu: _NDArrayLikeFloat, kappa: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def vonmises(self, /, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def wald(self, /, mean: _FloatLike_co, scale: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def wald(self, /, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def wald(self, /, mean: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def wald(self, /, mean: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def wald(self, /, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def beta(self, /, a: _FloatLike_co, b: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def beta(self, /, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def beta(self, /, a: _ArrayLikeFloat_co, b: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def beta(self, /, a: _NDArrayLikeFloat, b: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def beta(self, /, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d (default) + def gamma(self, /, shape: _FloatLike_co, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # size= (keyword) + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def gamma(self, /, shape: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def uniform(self, /, low: _FloatLike_co = 0.0, high: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # >=0d, >=0d, size= (positional) + def uniform(self, /, low: _ArrayLikeFloat_co, high: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def uniform(self, /, low: _ArrayLikeFloat_co, high: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d, size= (keyword) + def uniform(self, /, low: _ArrayLikeFloat_co = 0.0, high: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def uniform(self, /, low: _ArrayLikeFloat_co = 0.0, *, high: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def uniform(self, /, low: _NDArrayLikeFloat, high: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def uniform(self, /, low: _ArrayLikeFloat_co = 0.0, high: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def normal(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def normal(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def normal(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def normal(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def normal(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def normal(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def normal(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def gumbel(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def gumbel(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def gumbel(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def gumbel(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def gumbel(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def gumbel(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def gumbel(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def logistic(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def logistic(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def logistic(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def logistic(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def logistic(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def logistic(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def logistic( + self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def laplace(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def laplace(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def laplace(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def laplace(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def laplace(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def laplace(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def laplace( + self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def lognormal(self, /, mean: _FloatLike_co = 0.0, sigma: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def lognormal(self, /, mean: _ArrayLikeFloat_co, sigma: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def lognormal(self, /, mean: _ArrayLikeFloat_co, sigma: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # size= (keyword) + def lognormal(self, /, mean: _ArrayLikeFloat_co = 0.0, sigma: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def lognormal(self, /, mean: _ArrayLikeFloat_co = 0.0, *, sigma: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def lognormal(self, /, mean: _NDArrayLikeFloat, sigma: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def lognormal( + self, /, mean: _ArrayLikeFloat_co = 0.0, sigma: _ArrayLikeFloat_co = 1.0, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d, 0d + def triangular(self, /, left: _FloatLike_co, mode: _FloatLike_co, right: _FloatLike_co, size: None = None) -> float: ... + @overload # >=0d, >=0d, >=0d, size= + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: _ShapeLike + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >0d + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _NDArrayLikeFloat, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >0d, >=0d + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _NDArrayLikeFloat, right: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >0d, >=0d, >=0d + def triangular( + self, /, left: _NDArrayLikeFloat, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >=0d (fallback) + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d, 0d + def noncentral_f(self, /, dfnum: _FloatLike_co, dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = None) -> float: ... + @overload # >=0d, >=0d, >=0d, size= + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: _ShapeLike + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >0d + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _NDArrayLikeFloat, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >0d, >=0d + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _NDArrayLikeFloat, nonc: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >0d, >=0d, >=0d + def noncentral_f( + self, /, dfnum: _NDArrayLikeFloat, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >=0d (fallback) + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64 | Any: ... + + ### + # discrete + + # + @overload # 0d bool | int + def integers[AnyIntT: (bool, int)]( + self, low: int, high: int | None = None, size: None = None, *, dtype: type[AnyIntT], endpoint: bool = False + ) -> AnyIntT: ... + @overload # 0d integer dtype + def integers[ScalarT: np.integer | np.bool]( + self, low: int, high: int | None = None, size: None = None, *, dtype: _DTypeLike[ScalarT], endpoint: bool = False + ) -> ScalarT: ... + @overload # 0d int64 (default) def integers( - self, - low: int, - high: int | None = None, - size: None = None, - dtype: _DTypeLike[np.int64] | _Int64Codes = ..., - endpoint: bool = False, + self, low: int, high: int | None = None, size: None = None, dtype: _DTypeLikeI64 = ..., endpoint: bool = False ) -> np.int64: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: type[bool], - endpoint: bool = False, - ) -> bool: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: type[int], - endpoint: bool = False, - ) -> int: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _DTypeLike[np.bool] | _BoolCodes, - endpoint: bool = False, - ) -> np.bool: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _DTypeLike[_IntegerT], - endpoint: bool = False, - ) -> _IntegerT: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - dtype: _DTypeLike[np.int64] | _Int64Codes = ..., - endpoint: bool = False, - ) -> NDArray[np.int64]: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _DTypeLikeBool, - endpoint: bool = False, - ) -> NDArray[np.bool]: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _DTypeLike[_IntegerT], - endpoint: bool = False, - ) -> NDArray[_IntegerT]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _Int8Codes, - endpoint: bool = False, - ) -> np.int8: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _Int8Codes, - endpoint: bool = False, - ) -> NDArray[np.int8]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UInt8Codes, - endpoint: bool = False, - ) -> np.uint8: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _UInt8Codes, - endpoint: bool = False, - ) -> NDArray[np.uint8]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _Int16Codes, - endpoint: bool = False, - ) -> np.int16: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _Int16Codes, - endpoint: bool = False, - ) -> NDArray[np.int16]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UInt16Codes, - endpoint: bool = False, - ) -> np.uint16: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _UInt16Codes, - endpoint: bool = False, - ) -> NDArray[np.uint16]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _Int32Codes, - endpoint: bool = False, - ) -> np.int32: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _Int32Codes, - endpoint: bool = False, - ) -> NDArray[np.int32]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UInt32Codes, - endpoint: bool = False, - ) -> np.uint32: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _UInt32Codes, - endpoint: bool = False, - ) -> NDArray[np.uint32]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UInt64Codes, - endpoint: bool = False, - ) -> np.uint64: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _UInt64Codes, - endpoint: bool = False, - ) -> NDArray[np.uint64]: ... - @overload + @overload # 0d unknown def integers( + self, low: int, high: int | None = None, size: None = None, dtype: DTypeLike | None = ..., endpoint: bool = False + ) -> Any: ... + @overload # integer dtype, size= + def integers[ScalarT: np.integer | np.bool]( self, - low: int, - high: int | None = None, - size: None = None, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, *, - dtype: _IntPCodes, + size: _ShapeLike, + dtype: _DTypeLike[ScalarT], endpoint: bool = False, - ) -> np.intp: ... - @overload + ) -> NDArray[ScalarT]: ... + @overload # int64 (default), size= def integers( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, *, - dtype: _IntPCodes, + size: _ShapeLike, + dtype: _DTypeLikeI64 = ..., endpoint: bool = False, - ) -> NDArray[np.intp]: ... - @overload + ) -> NDArray[np.int64]: ... + @overload # unknown, size= def integers( self, - low: int, - high: int | None = None, - size: None = None, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, *, - dtype: _UIntPCodes, + size: _ShapeLike, + dtype: DTypeLike | None = ..., endpoint: bool = False, - ) -> np.uintp: ... - @overload - def integers( + ) -> np.ndarray: ... + @overload # >=0d, integer dtype + def integers[ScalarT: np.integer | np.bool]( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, *, - dtype: _UIntPCodes, + dtype: _DTypeLike[ScalarT], endpoint: bool = False, - ) -> NDArray[np.uintp]: ... - @overload + ) -> NDArray[ScalarT] | Any: ... + @overload # >=0d, int64 (default) def integers( self, - low: int, - high: int | None = None, - size: None = None, - dtype: DTypeLike | None = ..., + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: _DTypeLikeI64 = ..., endpoint: bool = False, - ) -> Any: ... - @overload + ) -> NDArray[np.int64] | Any: ... + @overload # >=0d, unknown def integers( self, low: _ArrayLikeInt_co, @@ -473,33 +536,184 @@ class Generator: size: _ShapeLike | None = None, dtype: DTypeLike | None = ..., endpoint: bool = False, - ) -> NDArray[Any]: ... + ) -> np.ndarray | Any: ... + + # + @overload # 0d + def zipf(self, /, a: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def zipf(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def zipf(self, /, a: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def zipf(self, /, a: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d + def geometric(self, /, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def geometric(self, /, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def geometric(self, /, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def geometric(self, /, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d + def logseries(self, /, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def logseries(self, /, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def logseries(self, /, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def logseries(self, /, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d (default) + def poisson(self, /, lam: _FloatLike_co = 1.0, size: None = None) -> int: ... + @overload # size= (keyword) + def poisson(self, /, lam: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # size= (positional) + def poisson(self, /, lam: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def poisson(self, /, lam: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def poisson(self, /, lam: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d, 0d + def binomial(self, /, n: int, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def binomial(self, /, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >=0d, >0d + def binomial(self, /, n: _ArrayLikeInt_co, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >0d, >=0d + def binomial(self, /, n: _NDArrayLikeInt, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d, >=0d + def binomial(self, /, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d, 0d + def negative_binomial(self, /, n: _FloatLike_co, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def negative_binomial(self, /, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >=0d, >0d + def negative_binomial(self, /, n: _ArrayLikeFloat_co, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >0d, >=0d + def negative_binomial(self, /, n: _NDArrayLikeFloat, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d, >=0d + def negative_binomial( + self, /, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d, 0d, 0d + def hypergeometric(self, /, ngood: int, nbad: int, nsample: int, size: None = None) -> int: ... + @overload # size= + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: _ShapeLike + ) -> NDArray[np.int64]: ... + @overload # >=0d, >=0d, >0d + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _NDArrayLikeInt, size: None = None + ) -> NDArray[np.int64] | Any: ... + @overload # >=0d, >0d, >=0d + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _NDArrayLikeInt, nsample: _ArrayLikeInt_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + @overload # >0d, >=0d, >=0d + def hypergeometric( + self, /, ngood: _NDArrayLikeInt, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + @overload # >=0d, >=0d, >=0d + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + + ### + # multivariate + + # + def dirichlet(self, /, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> _ArrayF64: ... + + # + def multivariate_normal( + self, + /, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, + size: _ShapeLike | None = None, + check_valid: Literal["warn", "raise", "ignore"] = "warn", + tol: float = 1e-8, + *, + method: Literal["svd", "eigh", "cholesky"] = "svd", + ) -> _ArrayF64: ... + + # + def multinomial( + self, /, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[np.int64]: ... + + # + def multivariate_hypergeometric( + self, + /, + colors: _ArrayLikeInt_co, + nsample: int, + size: _ShapeLike | None = None, + method: Literal["marginals", "count"] = "marginals", + ) -> NDArray[np.int64]: ... + + ### + # resampling + + # axis must be 0 for MutableSequence + @overload + def shuffle(self, /, x: np.ndarray, axis: int = 0) -> None: ... + @overload + def shuffle(self, /, x: MutableSequence[Any], axis: Literal[0] = 0) -> None: ... + + # + @overload + def permutation(self, /, x: int, axis: int = 0) -> NDArray[np.int64]: ... + @overload + def permutation(self, /, x: ArrayLike, axis: int = 0) -> np.ndarray: ... - # TODO: Use a TypeVar _T here to get away from Any output? - # Should be int->NDArray[int64], ArrayLike[_T] -> _T | NDArray[Any] + # + @overload + def permuted[ArrayT: np.ndarray](self, /, x: ArrayT, *, axis: int | None = None, out: None = None) -> ArrayT: ... + @overload + def permuted(self, /, x: ArrayLike, *, axis: int | None = None, out: None = None) -> np.ndarray: ... @overload + def permuted[ArrayT: np.ndarray](self, /, x: ArrayLike, *, axis: int | None = None, out: ArrayT) -> ArrayT: ... + + # + @overload # >=0d int, size=None (default) def choice( self, - a: int, + /, + a: int | _NestedSequence[int], size: None = None, replace: bool = True, p: _ArrayLikeFloat_co | None = None, axis: int = 0, shuffle: bool = True, ) -> int: ... - @overload - def choice( + @overload # >=0d known, size=None (default) + def choice[ScalarT: np.generic]( self, - a: int, - size: _ShapeLike | None = None, + /, + a: _ArrayLike[ScalarT], + size: None = None, replace: bool = True, p: _ArrayLikeFloat_co | None = None, axis: int = 0, shuffle: bool = True, - ) -> NDArray[int64]: ... - @overload + ) -> ScalarT: ... + @overload # >=0d unknown, size=None (default) def choice( self, + /, a: ArrayLike, size: None = None, replace: bool = True, @@ -507,356 +721,38 @@ class Generator: axis: int = 0, shuffle: bool = True, ) -> Any: ... - @overload + @overload # >=0d int, size= def choice( self, - a: ArrayLike, - size: _ShapeLike | None = None, + /, + a: int | _NestedSequence[int], + size: _ShapeLike, replace: bool = True, p: _ArrayLikeFloat_co | None = None, axis: int = 0, shuffle: bool = True, - ) -> NDArray[Any]: ... - @overload - def uniform( - self, - low: _FloatLike_co = 0.0, - high: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... # type: ignore[misc] - @overload - def uniform( - self, - low: _ArrayLikeFloat_co = 0.0, - high: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def normal( - self, - loc: _FloatLike_co = 0.0, - scale: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... # type: ignore[misc] - @overload - def normal( - self, - loc: _ArrayLikeFloat_co = 0.0, - scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_gamma( # type: ignore[misc] - self, - shape: _FloatLike_co, - size: None = None, - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = None, - ) -> float: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - *, - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = None, - ) -> NDArray[float32]: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def gamma( - self, shape: _FloatLike_co, scale: _FloatLike_co = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] - @overload - def gamma( - self, - shape: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def f( - self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = None - ) -> float: ... # type: ignore[misc] - @overload - def f( - self, - dfnum: _ArrayLikeFloat_co, - dfden: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def noncentral_f( - self, - dfnum: _FloatLike_co, - dfden: _FloatLike_co, - nonc: _FloatLike_co, - size: None = None, - ) -> float: ... # type: ignore[misc] - @overload - def noncentral_f( - self, - dfnum: _ArrayLikeFloat_co, - dfden: _ArrayLikeFloat_co, - nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def chisquare(self, df: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] - @overload - def chisquare( - self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def noncentral_chisquare( - self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = None - ) -> float: ... # type: ignore[misc] - @overload - def noncentral_chisquare( - self, - df: _ArrayLikeFloat_co, - nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def standard_t(self, df: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] - @overload - def standard_t( - self, df: _ArrayLikeFloat_co, size: None = None - ) -> NDArray[float64]: ... - @overload - def standard_t( - self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def vonmises( - self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = None - ) -> float: ... # type: ignore[misc] - @overload - def vonmises( - self, - mu: _ArrayLikeFloat_co, - kappa: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def pareto(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] - @overload - def pareto( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def weibull(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] - @overload - def weibull( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def power(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] - @overload - def power( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def standard_cauchy(self, size: None = None) -> float: ... # type: ignore[misc] - @overload - def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... - @overload - def laplace( - self, - loc: _FloatLike_co = 0.0, - scale: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... # type: ignore[misc] - @overload - def laplace( - self, - loc: _ArrayLikeFloat_co = 0.0, - scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def gumbel( - self, - loc: _FloatLike_co = 0.0, - scale: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... # type: ignore[misc] - @overload - def gumbel( - self, - loc: _ArrayLikeFloat_co = 0.0, - scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def logistic( - self, - loc: _FloatLike_co = 0.0, - scale: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... # type: ignore[misc] - @overload - def logistic( - self, - loc: _ArrayLikeFloat_co = 0.0, - scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def lognormal( - self, - mean: _FloatLike_co = 0.0, - sigma: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... # type: ignore[misc] - @overload - def lognormal( - self, - mean: _ArrayLikeFloat_co = 0.0, - sigma: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def rayleigh(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... # type: ignore[misc] - @overload - def rayleigh( - self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def wald( - self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = None - ) -> float: ... # type: ignore[misc] - @overload - def wald( - self, - mean: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def triangular( - self, - left: _FloatLike_co, - mode: _FloatLike_co, - right: _FloatLike_co, - size: None = None, - ) -> float: ... # type: ignore[misc] - @overload - def triangular( - self, - left: _ArrayLikeFloat_co, - mode: _ArrayLikeFloat_co, - right: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def binomial(self, n: int, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] - @overload - def binomial( - self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - @overload - def negative_binomial( - self, n: _FloatLike_co, p: _FloatLike_co, size: None = None - ) -> int: ... # type: ignore[misc] - @overload - def negative_binomial( - self, - n: _ArrayLikeFloat_co, - p: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - @overload - def poisson(self, lam: _FloatLike_co = 1.0, size: None = None) -> int: ... # type: ignore[misc] - @overload - def poisson( - self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - @overload - def zipf(self, a: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] - @overload - def zipf( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - @overload - def geometric(self, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] - @overload - def geometric( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - @overload - def hypergeometric( - self, ngood: int, nbad: int, nsample: int, size: None = None - ) -> int: ... # type: ignore[misc] - @overload - def hypergeometric( - self, - ngood: _ArrayLikeInt_co, - nbad: _ArrayLikeInt_co, - nsample: _ArrayLikeInt_co, - size: _ShapeLike | None = None, - ) -> NDArray[int64]: ... - @overload - def logseries(self, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] - @overload - def logseries( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - def multivariate_normal( + ) -> NDArray[np.int64]: ... + @overload # >=0d known, size= + def choice[ScalarT: np.generic]( self, - mean: _ArrayLikeFloat_co, - cov: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - check_valid: Literal["warn", "raise", "ignore"] = "warn", - tol: float = 1e-8, - *, - method: Literal["svd", "eigh", "cholesky"] = "svd", - ) -> NDArray[float64]: ... - def multinomial( - self, n: _ArrayLikeInt_co, - pvals: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - def multivariate_hypergeometric( + /, + a: _ArrayLike[ScalarT], + size: _ShapeLike, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> NDArray[ScalarT]: ... + @overload # >=0d unknown, size= + def choice( self, - colors: _ArrayLikeInt_co, - nsample: int, - size: _ShapeLike | None = None, - method: Literal["marginals", "count"] = "marginals", - ) -> NDArray[int64]: ... - def dirichlet( - self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - def permuted( - self, x: ArrayLike, *, axis: int | None = None, out: NDArray[Any] | None = None - ) -> NDArray[Any]: ... - - # axis must be 0 for MutableSequence - @overload - def shuffle(self, /, x: np.ndarray, axis: int = 0) -> None: ... - @overload - def shuffle(self, /, x: MutableSequence[Any], axis: Literal[0] = 0) -> None: ... + /, + a: ArrayLike, + size: _ShapeLike, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> np.ndarray: ... -def default_rng( - seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = None -) -> Generator: ... +def default_rng(seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = None) -> Generator: ... diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index ed9b6a3d12a8..6623d347a4cf 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -294,6 +294,8 @@ cdef class Generator: >>> nested_spawn = child_rng1.spawn(20) """ + if n_children < 0: + raise ValueError("n_children must be non-negative") return [type(self)(g) for g in self._bit_generator.spawn(n_children)] def random(self, size=None, dtype=np.float64, out=None): @@ -539,7 +541,7 @@ cdef class Generator: Byteorder must be native. The default value is np.float64. method : str, optional Either 'inv' or 'zig'. 'inv' uses the default inverse CDF method. - 'zig' uses the much faster Ziggurat method of Marsaglia and Tsang. + 'zig' uses the much faster Ziggurat method of Marsaglia and Tsang [1]_. out : ndarray, optional Alternative output array in which to place the result. If size is not None, it must have the same shape as the provided size and must match the type of @@ -550,6 +552,12 @@ cdef class Generator: out : float or ndarray Drawn samples. + References + ---------- + .. [1] Marsaglia, G. and Tsang, W. W. (2000). The Ziggurat method for + generating random variables. Journal of Statistical Software, 5, 1-7. + https://doi.org/10.18637/jss.v005.i08 + Examples -------- Output a 3x8000 array: @@ -793,7 +801,7 @@ cdef class Generator: than the optimized sampler even if each element of ``p`` is 1 / len(a). ``p`` must sum to 1 when cast to ``float64``. To ensure this, you may wish - to normalize using ``p = p / np.sum(p, dtype=float)``. + to normalize using ``p = p / np.sum(p, dtype=np.float64)``. When passing ``a`` as an integer type and ``size`` is not specified, the return type is a native Python ``int``. @@ -949,7 +957,7 @@ cdef class Generator: cutoff = 20 if pop_size_i > 10000 and (size_i > (pop_size_i // cutoff)): # Tail shuffle size elements - idx = np.PyArray_Arange(0, pop_size_i, 1, np.NPY_INT64) + idx = np.arange(0, pop_size_i, dtype=np.int64) idx_data = (idx).data with self.lock, nogil: _shuffle_int(&self._bitgen, pop_size_i, @@ -982,7 +990,7 @@ cdef class Generator: idx_data[j - pop_size_i + size_i] = j if shuffle: _shuffle_int(&self._bitgen, size_i, 1, idx_data) - idx.shape = shape + idx = idx.reshape(shape) if is_scalar and isinstance(idx, np.ndarray): # In most cases a scalar will have been made an array @@ -3946,8 +3954,7 @@ cdef class Generator: _factor = u * np.sqrt(s) x = mean + x @ _factor.T - x.shape = tuple(final_shape) - return x + return x.reshape(tuple(final_shape)) def multinomial(self, object n, object pvals, size=None): """ diff --git a/numpy/random/_mt19937.pyi b/numpy/random/_mt19937.pyi index 03373a6dd6ea..074a8eec5447 100644 --- a/numpy/random/_mt19937.pyi +++ b/numpy/random/_mt19937.pyi @@ -22,6 +22,6 @@ class MT19937(BitGenerator): def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ... def jumped(self, jumps: int = 1) -> MT19937: ... @property # type: ignore[override] - def state(self) -> _MT19937State: ... + def state(self) -> _MT19937State: ... # pyrefly: ignore[bad-override] @state.setter def state(self, value: _MT19937State) -> None: ... diff --git a/numpy/random/_pcg64.pyi b/numpy/random/_pcg64.pyi index a9e81f7f181b..aede210d4026 100644 --- a/numpy/random/_pcg64.pyi +++ b/numpy/random/_pcg64.pyi @@ -21,21 +21,16 @@ class PCG64(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... def jumped(self, jumps: int = 1) -> PCG64: ... @property # type: ignore[override] - def state( - self, - ) -> _PCG64State: ... + def state(self) -> _PCG64State: ... # pyrefly: ignore[bad-override] @state.setter - def state( - self, - value: _PCG64State, - ) -> None: ... + def state(self, value: _PCG64State) -> None: ... def advance(self, delta: int) -> PCG64: ... class PCG64DXSM(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... def jumped(self, jumps: int = 1) -> PCG64DXSM: ... @property # type: ignore[override] - def state(self) -> _PCG64State: ... + def state(self) -> _PCG64State: ... # pyrefly: ignore[bad-override] @state.setter def state(self, value: _PCG64State) -> None: ... def advance(self, delta: int) -> PCG64DXSM: ... diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx index 30a00a11aa1d..597ef5979ec6 100644 --- a/numpy/random/_pcg64.pyx +++ b/numpy/random/_pcg64.pyx @@ -256,7 +256,7 @@ cdef class PCG64(BitGenerator): Notes ----- - Advancing a RNG updates the underlying RNG state as-if a given + Advancing an RNG updates the underlying RNG state as-if a given number of calls to the underlying RNG have been made. In general there is not a one-to-one relationship between the number output random values from a particular distribution and the number of @@ -490,7 +490,7 @@ cdef class PCG64DXSM(BitGenerator): Notes ----- - Advancing a RNG updates the underlying RNG state as-if a given + Advancing an RNG updates the underlying RNG state as-if a given number of calls to the underlying RNG have been made. In general there is not a one-to-one relationship between the number output random values from a particular distribution and the number of diff --git a/numpy/random/_philox.pyi b/numpy/random/_philox.pyi index 3089f11ea629..ea9880ea10e2 100644 --- a/numpy/random/_philox.pyi +++ b/numpy/random/_philox.pyi @@ -29,7 +29,7 @@ class Philox(BitGenerator): key: _ArrayLikeInt_co | None = ..., ) -> None: ... @property # type: ignore[override] - def state(self) -> _PhiloxState: ... + def state(self) -> _PhiloxState: ... # pyrefly: ignore[bad-override] @state.setter def state(self, value: _PhiloxState) -> None: ... def jumped(self, jumps: int = 1) -> Philox: ... diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx index da47ad21e2de..422810b9a12e 100644 --- a/numpy/random/_philox.pyx +++ b/numpy/random/_philox.pyx @@ -307,7 +307,7 @@ cdef class Philox(BitGenerator): Notes ----- - Advancing a RNG updates the underlying RNG state as-if a given + Advancing an RNG updates the underlying RNG state as-if a given number of calls to the underlying RNG have been made. In general there is not a one-to-one relationship between the number output random values from a particular distribution and the number of diff --git a/numpy/random/_pickle.pyi b/numpy/random/_pickle.pyi index b8b1b7bcf63b..b0aa143801ba 100644 --- a/numpy/random/_pickle.pyi +++ b/numpy/random/_pickle.pyi @@ -1,5 +1,5 @@ from collections.abc import Callable -from typing import Final, Literal, TypedDict, TypeVar, overload, type_check_only +from typing import Final, Literal, TypedDict, overload, type_check_only from numpy.random._generator import Generator from numpy.random._mt19937 import MT19937 @@ -9,8 +9,6 @@ from numpy.random._sfc64 import SFC64 from numpy.random.bit_generator import BitGenerator from numpy.random.mtrand import RandomState -_T = TypeVar("_T", bound=BitGenerator) - @type_check_only class _BitGenerators(TypedDict): MT19937: type[MT19937] @@ -19,6 +17,8 @@ class _BitGenerators(TypedDict): Philox: type[Philox] SFC64: type[SFC64] +### + BitGenerators: Final[_BitGenerators] = ... @overload @@ -32,7 +32,7 @@ def __bit_generator_ctor(bit_generator: Literal["Philox"]) -> Philox: ... @overload def __bit_generator_ctor(bit_generator: Literal["SFC64"]) -> SFC64: ... @overload -def __bit_generator_ctor(bit_generator: type[_T]) -> _T: ... +def __bit_generator_ctor[BitGeneratorT: BitGenerator](bit_generator: type[BitGeneratorT]) -> BitGeneratorT: ... def __generator_ctor( bit_generator_name: str | type[BitGenerator] | BitGenerator = "MT19937", bit_generator_ctor: Callable[[str | type[BitGenerator]], BitGenerator] = ..., diff --git a/numpy/random/_sfc64.pyi b/numpy/random/_sfc64.pyi index f5f3fed9c251..5bddaf2b7676 100644 --- a/numpy/random/_sfc64.pyi +++ b/numpy/random/_sfc64.pyi @@ -20,6 +20,6 @@ class _SFC64State(TypedDict): class SFC64(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... @property # type: ignore[override] - def state(self) -> _SFC64State: ... + def state(self) -> _SFC64State: ... # pyrefly: ignore[bad-override] @state.setter def state(self, value: _SFC64State) -> None: ... diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index ee4499dee1f3..51ee8188e65f 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -8,7 +8,6 @@ from typing import ( Literal, NamedTuple, Self, - TypeAlias, TypedDict, overload, type_check_only, @@ -29,7 +28,7 @@ __all__ = ["BitGenerator", "SeedSequence"] ### -_DTypeLikeUint_: TypeAlias = _DTypeLike[np.uint32 | np.uint64] | _UInt32Codes | _UInt64Codes +type _DTypeLikeUint_ = _DTypeLike[np.uint32 | np.uint64] | _UInt32Codes | _UInt64Codes @type_check_only class _SeedSeqState(TypedDict): @@ -50,7 +49,7 @@ class _Interface(NamedTuple): @type_check_only class _CythonMixin: def __setstate_cython__(self, pyx_state: object, /) -> None: ... - def __reduce_cython__(self) -> Any: ... # noqa: ANN401 + def __reduce_cython__(self) -> Any: ... @type_check_only class _GenerateStateMixin(_CythonMixin): diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx index 0bb9552a86ce..676f95e5ad70 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/bit_generator.pyx @@ -38,7 +38,7 @@ from itertools import cycle import re from secrets import randbits -from threading import Lock +from threading import RLock from cpython.pycapsule cimport PyCapsule_New @@ -241,6 +241,8 @@ cdef class SeedlessSeedSequence: raise NotImplementedError('seedless SeedSequences cannot generate state') def spawn(self, n_children): + if n_children < 0: + raise ValueError("n_children must be non-negative") return [self] * n_children @@ -476,6 +478,9 @@ cdef class SeedSequence: """ cdef uint32_t i + if n_children < 0: + raise ValueError("n_children must be non-negative") + seqs = [] for i in range(self.n_children_spawned, self.n_children_spawned + n_children): @@ -522,7 +527,7 @@ cdef class BitGenerator: """ def __init__(self, seed=None): - self.lock = Lock() + self.lock = RLock() self._bitgen.state = 0 if type(self) is BitGenerator: raise NotImplementedError('BitGenerator is a base class and cannot be instantized') @@ -626,6 +631,8 @@ cdef class BitGenerator: Equivalent method on the generator and seed sequence. """ + if n_children < 0: + raise ValueError("n_children must be non-negative") if not isinstance(self._seed_seq, ISpawnableSeedSequence): raise TypeError( "The underlying SeedSequence does not implement spawning.") @@ -709,3 +716,8 @@ cdef class BitGenerator: if self._cffi is None: self._cffi = prepare_cffi(&self._bitgen) return self._cffi + +# NOTE: This has no implementation and should not be used. It purely exists for +# backwards compatibility, see https://github.com/scipy/scipy/issues/24215. +cdef class SeedlessSequence: + pass diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index c20d35193d45..066a56545f23 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -1,44 +1,28 @@ -import builtins +from builtins import bytes as py_bytes from collections.abc import Callable from typing import Any, Literal, overload import numpy as np -from numpy import ( - dtype, - float64, - int8, - int16, - int32, - int64, - int_, - long, - uint, - uint8, - uint16, - uint32, - uint64, - ulong, -) from numpy._typing import ( ArrayLike, NDArray, _ArrayLikeFloat_co, _ArrayLikeInt_co, + _BoolCodes, + _DTypeLike, _DTypeLikeBool, + _DTypeLikeInt, _Int8Codes, _Int16Codes, _Int32Codes, _Int64Codes, - _IntCodes, - _LongCodes, + _IntPCodes, _ShapeLike, - _SupportsDType, _UInt8Codes, _UInt16Codes, _UInt32Codes, _UInt64Codes, - _UIntCodes, - _ULongCodes, + _UIntPCodes, ) from numpy.random.bit_generator import BitGenerator @@ -100,277 +84,297 @@ __all__ = [ class RandomState: _bit_generator: BitGenerator - def __init__(self, seed: _ArrayLikeInt_co | BitGenerator | None = ...) -> None: ... + + def __init__(self, seed: _ArrayLikeInt_co | BitGenerator | None = None) -> None: ... def __repr__(self) -> str: ... def __str__(self) -> str: ... def __getstate__(self) -> dict[str, Any]: ... def __setstate__(self, state: dict[str, Any]) -> None: ... - def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... # noqa: E501 + def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... + + # def seed(self, seed: _ArrayLikeFloat_co | None = None) -> None: ... + + # @overload def get_state(self, legacy: Literal[False] = False) -> dict[str, Any]: ... @overload - def get_state( - self, legacy: Literal[True] = True - ) -> dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]: ... - def set_state( - self, state: dict[str, Any] | tuple[str, NDArray[uint32], int, int, float] - ) -> None: ... + def get_state(self, legacy: Literal[True] = True) -> dict[str, Any] | tuple[str, NDArray[np.uint32], int, int, float]: ... + + # + def set_state(self, state: dict[str, Any] | tuple[str, NDArray[np.uint32], int, int, float]) -> None: ... + + # @overload - def random_sample(self, size: None = None) -> float: ... # type: ignore[misc] + def random_sample(self, size: None = None) -> float: ... @overload - def random_sample(self, size: _ShapeLike) -> NDArray[float64]: ... + def random_sample(self, size: _ShapeLike) -> NDArray[np.float64]: ... + + # @overload - def random(self, size: None = None) -> float: ... # type: ignore[misc] + def random(self, size: None = None) -> float: ... @overload - def random(self, size: _ShapeLike) -> NDArray[float64]: ... + def random(self, size: _ShapeLike) -> NDArray[np.float64]: ... + + # @overload - def beta(self, a: float, b: float, size: None = None) -> float: ... # type: ignore[misc] + def beta(self, a: float, b: float, size: None = None) -> float: ... @overload - def beta( - self, - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def beta(self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def exponential(self, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] + def beta(self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # @overload - def exponential( - self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def exponential(self, scale: float = 1.0, size: None = None) -> float: ... @overload - def standard_exponential(self, size: None = None) -> float: ... # type: ignore[misc] + def exponential(self, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def standard_exponential(self, size: _ShapeLike) -> NDArray[float64]: ... + def exponential(self, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def tomaxint(self, size: None = None) -> int: ... # type: ignore[misc] + def exponential(self, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> NDArray[np.float64] | Any: ... + + # @overload - # Generates long values, but stores it in a 64bit int: - def tomaxint(self, size: _ShapeLike) -> NDArray[int64]: ... + def standard_exponential(self, size: None = None) -> float: ... @overload - def randint( # type: ignore[misc] - self, - low: int, - high: int | None = None, - size: None = None, - ) -> int: ... + def standard_exponential(self, size: _ShapeLike) -> NDArray[np.float64]: ... + + # @overload - def randint( # type: ignore[misc] - self, - low: int, - high: int | None = None, - size: None = None, - dtype: type[bool] = ..., - ) -> bool: ... + def tomaxint(self, size: None = None) -> int: ... + @overload # Generates long values, but stores it in a 64bit int: + def tomaxint(self, size: _ShapeLike) -> NDArray[np.int64]: ... + + # @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, size: None = None, - dtype: type[np.bool] = ..., - ) -> np.bool: ... + *, + dtype: type[bool], + ) -> bool: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, size: None = None, - dtype: type[int] = ..., + dtype: type[int] = int, ) -> int: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, size: None = None, - dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 - ) -> uint8: ... + *, + dtype: _DTypeLike[np.bool] | _BoolCodes, + ) -> np.bool: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, size: None = None, - dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 - ) -> uint16: ... + *, + dtype: _DTypeLike[np.int8] | _Int8Codes, + ) -> np.int8: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, size: None = None, - dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 - ) -> uint32: ... + *, + dtype: _DTypeLike[np.int16] | _Int16Codes, + ) -> np.int16: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, size: None = None, - dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., # noqa: E501 - ) -> uint: ... + *, + dtype: _DTypeLike[np.int32] | _Int32Codes, + ) -> np.int32: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, size: None = None, - dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 - ) -> ulong: ... + *, + dtype: _DTypeLike[np.int64] | _Int64Codes, + ) -> np.int64: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, size: None = None, - dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 - ) -> uint64: ... + *, + dtype: _DTypeLike[np.int_] | _IntPCodes, + ) -> np.int_: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, size: None = None, - dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 - ) -> int8: ... + *, + dtype: _DTypeLike[np.uint8] | _UInt8Codes, + ) -> np.uint8: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, size: None = None, - dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 - ) -> int16: ... + *, + dtype: _DTypeLike[np.uint16] | _UInt16Codes, + ) -> np.uint16: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, size: None = None, - dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 - ) -> int32: ... + *, + dtype: _DTypeLike[np.uint32] | _UInt32Codes, + ) -> np.uint32: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, size: None = None, - dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., # noqa: E501 - ) -> int_: ... + *, + dtype: _DTypeLike[np.uint64] | _UInt64Codes, + ) -> np.uint64: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, size: None = None, - dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 - ) -> long: ... + *, + dtype: _DTypeLike[np.uintp] | _UIntPCodes, + ) -> np.uint: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, size: None = None, - dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., # noqa: E501 - ) -> int64: ... - @overload - def randint( # type: ignore[misc] - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - ) -> NDArray[long]: ... + *, + dtype: _DTypeLikeInt, + ) -> np.integer | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: _DTypeLikeBool = ..., - ) -> NDArray[np.bool]: ... + *, + dtype: _DTypeLikeBool, + ) -> NDArray[np.bool] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 - ) -> NDArray[int8]: ... + *, + dtype: _DTypeLike[np.int8] | _Int8Codes, + ) -> NDArray[np.int8] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 - ) -> NDArray[int16]: ... + *, + dtype: _DTypeLike[np.int16] | _Int16Codes, + ) -> NDArray[np.int16] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 - ) -> NDArray[int32]: ... + *, + dtype: _DTypeLike[np.int32] | _Int32Codes, + ) -> NDArray[np.int32] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] | None = ..., # noqa: E501 - ) -> NDArray[int64]: ... + *, + dtype: _DTypeLike[np.int64] | _Int64Codes, + ) -> NDArray[np.int64] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 - ) -> NDArray[uint8]: ... + dtype: type[int] | _DTypeLike[np.int_] | _IntPCodes = int, + ) -> NDArray[np.int_] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 - ) -> NDArray[uint16]: ... + *, + dtype: _DTypeLike[np.uint8] | _UInt8Codes, + ) -> NDArray[np.uint8] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 - ) -> NDArray[uint32]: ... + *, + dtype: _DTypeLike[np.uint16] | _UInt16Codes, + ) -> NDArray[np.uint16] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 - ) -> NDArray[uint64]: ... + *, + dtype: _DTypeLike[np.uint32] | _UInt32Codes, + ) -> NDArray[np.uint32] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[long] | type[int] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 - ) -> NDArray[long]: ... + *, + dtype: _DTypeLike[np.uint64] | _UInt64Codes, + ) -> NDArray[np.uint64] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 - ) -> NDArray[ulong]: ... - def bytes(self, length: int) -> builtins.bytes: ... + *, + dtype: _DTypeLikeInt, + ) -> NDArray[np.integer] | Any: ... + + # + def bytes(self, length: int) -> py_bytes: ... + + # @overload def choice( self, @@ -383,10 +387,10 @@ class RandomState: def choice( self, a: int, - size: _ShapeLike | None = None, + size: _ShapeLike, replace: bool = True, p: _ArrayLikeFloat_co | None = None, - ) -> NDArray[long]: ... + ) -> NDArray[np.long]: ... @overload def choice( self, @@ -399,285 +403,517 @@ class RandomState: def choice( self, a: ArrayLike, - size: _ShapeLike | None = None, + size: _ShapeLike, replace: bool = True, p: _ArrayLikeFloat_co | None = None, ) -> NDArray[Any]: ... + + # + @overload + def uniform( + self, + low: float = 0.0, + high: float = 1.0, + size: None = None, + ) -> float: ... @overload def uniform( - self, low: float = 0.0, high: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + self, + low: _ArrayLikeFloat_co, + high: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload def uniform( self, low: _ArrayLikeFloat_co = 0.0, high: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + *, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload - def rand(self) -> float: ... + def uniform( + self, + low: _ArrayLikeFloat_co = 0.0, + high: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # @overload - def rand(self, *args: int) -> NDArray[float64]: ... + def rand(self, /) -> float: ... + @overload + def rand(self, arg0: int, /, *args: int) -> NDArray[np.float64]: ... + + # @overload - def randn(self) -> float: ... + def randn(self, /) -> float: ... @overload - def randn(self, *args: int) -> NDArray[float64]: ... + def randn(self, arg0: int, /, *args: int) -> NDArray[np.float64]: ... + + # @overload def random_integers( - self, low: int, high: int | None = None, size: None = None - ) -> int: ... # type: ignore[misc] + self, + low: int, + high: int | None = None, + size: None = None, + ) -> int: ... + @overload + def random_integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None, + size: _ShapeLike, + ) -> NDArray[np.long]: ... @overload def random_integers( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - ) -> NDArray[long]: ... + *, + size: _ShapeLike, + ) -> NDArray[np.long]: ... @overload - def standard_normal(self, size: None = None) -> float: ... # type: ignore[misc] + def random_integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: None = None, + ) -> NDArray[np.long] | Any: ... + + # + @overload + def standard_normal(self, size: None = None) -> float: ... + @overload + def standard_normal(self, size: _ShapeLike) -> NDArray[np.float64]: ... + + # @overload - def standard_normal( # type: ignore[misc] - self, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def normal( + self, + loc: float = 0.0, + scale: float = 1.0, + size: None = None, + ) -> float: ... @overload def normal( - self, loc: float = 0.0, scale: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + self, + loc: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload def normal( self, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + *, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload - def standard_gamma( # type: ignore[misc] + def normal( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # + @overload + def standard_gamma(self, shape: float, size: None = None) -> float: ... + @overload + def standard_gamma(self, shape: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def standard_gamma(self, shape: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # + @overload + def gamma( self, shape: float, + scale: float = 1.0, size: None = None, ) -> float: ... @overload - def standard_gamma( + def gamma( self, shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + scale: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload - def gamma(self, shape: float, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] + def gamma( + self, + shape: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co = 1.0, + *, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload def gamma( self, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # + @overload + def f(self, dfnum: float, dfden: float, size: None = None) -> float: ... @overload - def f(self, dfnum: float, dfden: float, size: None = None) -> float: ... # type: ignore[misc] + def f(self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def f( + def f(self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # + @overload + def noncentral_f( self, - dfnum: _ArrayLikeFloat_co, - dfden: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + dfnum: float, + dfden: float, + nonc: float, + size: None = None, + ) -> float: ... @overload def noncentral_f( - self, dfnum: float, dfden: float, nonc: float, size: None = None - ) -> float: ... # type: ignore[misc] + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload def noncentral_f( self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # @overload - def chisquare(self, df: float, size: None = None) -> float: ... # type: ignore[misc] + def chisquare(self, df: float, size: None = None) -> float: ... @overload - def chisquare( - self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def chisquare(self, df: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def chisquare(self, df: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # @overload def noncentral_chisquare( - self, df: float, nonc: float, size: None = None - ) -> float: ... # type: ignore[misc] + self, + df: float, + nonc: float, + size: None = None, + ) -> float: ... @overload def noncentral_chisquare( self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload - def standard_t(self, df: float, size: None = None) -> float: ... # type: ignore[misc] + def noncentral_chisquare( + self, + df: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # @overload - def standard_t( - self, df: _ArrayLikeFloat_co, size: None = None - ) -> NDArray[float64]: ... + def standard_t(self, df: float, size: None = None) -> float: ... @overload - def standard_t( - self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def standard_t(self, df: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def vonmises(self, mu: float, kappa: float, size: None = None) -> float: ... # type: ignore[misc] + def standard_t(self, df: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # @overload - def vonmises( - self, - mu: _ArrayLikeFloat_co, - kappa: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def vonmises(self, mu: float, kappa: float, size: None = None) -> float: ... + @overload + def vonmises(self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def vonmises(self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # + @overload + def pareto(self, a: float, size: None = None) -> float: ... @overload - def pareto(self, a: float, size: None = None) -> float: ... # type: ignore[misc] + def pareto(self, a: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def pareto( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def pareto(self, a: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # + @overload + def weibull(self, a: float, size: None = None) -> float: ... @overload - def weibull(self, a: float, size: None = None) -> float: ... # type: ignore[misc] + def weibull(self, a: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def weibull(self, a: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # @overload - def weibull( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def power(self, a: float, size: None = None) -> float: ... @overload - def power(self, a: float, size: None = None) -> float: ... # type: ignore[misc] + def power(self, a: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def power( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def power(self, a: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # @overload - def standard_cauchy(self, size: None = None) -> float: ... # type: ignore[misc] + def standard_cauchy(self, size: None = None) -> float: ... @overload - def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... + def standard_cauchy(self, size: _ShapeLike) -> NDArray[np.float64]: ... + + # @overload def laplace( - self, loc: float = 0.0, scale: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + self, + loc: float = 0.0, + scale: float = 1.0, + size: None = None, + ) -> float: ... + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload def laplace( self, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + *, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # + @overload + def gumbel( + self, + loc: float = 0.0, + scale: float = 1.0, + size: None = None, + ) -> float: ... @overload def gumbel( - self, loc: float = 0.0, scale: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + self, + loc: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload def gumbel( self, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + *, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... + @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # @overload def logistic( - self, loc: float = 0.0, scale: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + self, + loc: float = 0.0, + scale: float = 1.0, + size: None = None, + ) -> float: ... + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload def logistic( self, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + *, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # @overload def lognormal( - self, mean: float = 0.0, sigma: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + self, + mean: float = 0.0, + sigma: float = 1.0, + size: None = None, + ) -> float: ... + @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co, + sigma: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload def lognormal( self, mean: _ArrayLikeFloat_co = 0.0, sigma: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + *, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... + @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co = 0.0, + sigma: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # @overload - def rayleigh(self, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] + def rayleigh(self, scale: float = 1.0, size: None = None) -> float: ... + @overload + def rayleigh(self, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def rayleigh(self, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def rayleigh(self, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> NDArray[np.float64] | Any: ... + + # @overload - def rayleigh( - self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def wald(self, mean: float, scale: float, size: None = None) -> float: ... @overload - def wald(self, mean: float, scale: float, size: None = None) -> float: ... # type: ignore[misc] + def wald(self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def wald( + def wald(self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # + @overload + def triangular( self, - mean: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + left: float, + mode: float, + right: float, + size: None = None, + ) -> float: ... @overload def triangular( - self, left: float, mode: float, right: float, size: None = None - ) -> float: ... # type: ignore[misc] + self, + left: _ArrayLikeFloat_co, + mode: _ArrayLikeFloat_co, + right: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload def triangular( self, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # @overload - def binomial( - self, n: int, p: float, size: None = None - ) -> int: ... # type: ignore[misc] + def binomial(self, n: int, p: float, size: None = None) -> int: ... @overload - def binomial( - self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[long]: ... + def binomial(self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... @overload - def negative_binomial( - self, n: float, p: float, size: None = None - ) -> int: ... # type: ignore[misc] + def binomial(self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.long] | Any: ... + + # @overload - def negative_binomial( - self, - n: _ArrayLikeFloat_co, - p: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[long]: ... + def negative_binomial(self, n: float, p: float, size: None = None) -> int: ... + @overload + def negative_binomial(self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... + @overload + def negative_binomial(self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.long] | Any: ... + + # @overload - def poisson( - self, lam: float = 1.0, size: None = None - ) -> int: ... # type: ignore[misc] + def poisson(self, lam: float = 1.0, size: None = None) -> int: ... @overload - def poisson( - self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None - ) -> NDArray[long]: ... + def poisson(self, lam: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... @overload - def zipf(self, a: float, size: None = None) -> int: ... # type: ignore[misc] + def poisson(self, lam: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> NDArray[np.long]: ... @overload - def zipf( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[long]: ... + def poisson(self, lam: _ArrayLikeFloat_co = 1.0, size: None = None) -> NDArray[np.long] | Any: ... + + # + @overload + def zipf(self, a: float, size: None = None) -> int: ... @overload - def geometric(self, p: float, size: None = None) -> int: ... # type: ignore[misc] + def zipf(self, a: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... + @overload + def zipf(self, a: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.long] | Any: ... + + # @overload - def geometric( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[long]: ... + def geometric(self, p: float, size: None = None) -> int: ... + @overload + def geometric(self, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... + @overload + def geometric(self, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.long] | Any: ... + + # @overload def hypergeometric( - self, ngood: int, nbad: int, nsample: int, size: None = None - ) -> int: ... # type: ignore[misc] + self, + ngood: int, + nbad: int, + nsample: int, + size: None = None, + ) -> int: ... @overload def hypergeometric( self, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, - size: _ShapeLike | None = None, - ) -> NDArray[long]: ... + size: _ShapeLike, + ) -> NDArray[np.long]: ... @overload - def logseries(self, p: float, size: None = None) -> int: ... # type: ignore[misc] + def hypergeometric( + self, + ngood: _ArrayLikeInt_co, + nbad: _ArrayLikeInt_co, + nsample: _ArrayLikeInt_co, + size: None = None, + ) -> NDArray[np.long] | Any: ... + + # + @overload + def logseries(self, p: float, size: None = None) -> int: ... @overload - def logseries( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[long]: ... + def logseries(self, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... + @overload + def logseries(self, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.long] | Any: ... + + # def multivariate_normal( self, mean: _ArrayLikeFloat_co, @@ -685,18 +921,20 @@ class RandomState: size: _ShapeLike | None = None, check_valid: Literal["warn", "raise", "ignore"] = "warn", tol: float = 1e-8, - ) -> NDArray[float64]: ... - def multinomial( - self, n: _ArrayLikeInt_co, - pvals: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[long]: ... - def dirichlet( - self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + ) -> NDArray[np.float64]: ... + + # + def multinomial(self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.long]: ... + + # + def dirichlet(self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... + + # def shuffle(self, x: ArrayLike) -> None: ... + + # @overload - def permutation(self, x: int) -> NDArray[long]: ... + def permutation(self, x: int) -> NDArray[np.long]: ... @overload def permutation(self, x: ArrayLike) -> NDArray[Any]: ... @@ -755,5 +993,4 @@ sample = _rand.random_sample ranf = _rand.random_sample def set_bit_generator(bitgen: BitGenerator) -> None: ... - def get_bit_generator() -> BitGenerator: ... diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 8e7f437641ca..987d3edf159f 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -222,12 +222,13 @@ cdef class RandomState: "be instantized.") self._bitgen = ( PyCapsule_GetPointer(capsule, name))[0] self._aug_state.bit_generator = &self._bitgen - self._reset_gauss() self.lock = bit_generator.lock + self._reset_gauss() cdef _reset_gauss(self): - self._aug_state.has_gauss = 0 - self._aug_state.gauss = 0.0 + with self.lock: + self._aug_state.has_gauss = 0 + self._aug_state.gauss = 0.0 def seed(self, seed=None): """ @@ -251,8 +252,9 @@ cdef class RandomState: """ if not isinstance(self._bit_generator, _MT19937): raise TypeError('can only re-seed a MT19937 BitGenerator') - self._bit_generator._legacy_seeding(seed) - self._reset_gauss() + with self.lock: + self._bit_generator._legacy_seeding(seed) + self._reset_gauss() def get_state(self, legacy=True): """ @@ -300,8 +302,9 @@ cdef class RandomState: 'MT19937 BitGenerator. To silence this warning, ' 'set `legacy` to False.', RuntimeWarning) legacy = False - st['has_gauss'] = self._aug_state.has_gauss - st['gauss'] = self._aug_state.gauss + with self.lock: + st['has_gauss'] = self._aug_state.has_gauss + st['gauss'] = self._aug_state.gauss if legacy and not isinstance(self._bit_generator, _MT19937): raise ValueError( "legacy can only be True when the underlying bitgenerator is " @@ -381,9 +384,13 @@ cdef class RandomState: st['has_gauss'] = state[3] st['gauss'] = state[4] - self._aug_state.gauss = st.get('gauss', 0.0) - self._aug_state.has_gauss = st.get('has_gauss', 0) - self._bit_generator.state = st + cdef double gauss = st.get('gauss', 0.0) + cdef int has_gauss = st.get('has_gauss', 0) + + with self.lock: + self._aug_state.gauss = gauss + self._aug_state.has_gauss = has_gauss + self._bit_generator.state = st def random_sample(self, size=None): """ @@ -1042,7 +1049,7 @@ cdef class RandomState: idx = found else: idx = self.permutation(pop_size)[:size] - idx.shape = shape + idx = idx.reshape(shape) if is_scalar and isinstance(idx, np.ndarray): # In most cases a scalar will have been made an array @@ -2414,11 +2421,14 @@ cdef class RandomState: Notes ----- - The probability density for the Pareto distribution is + The probability density for the Pareto II distribution is + + .. math:: p(x) = \\frac{a}{(x+1)^{a+1}} , x \ge 0 - .. math:: p(x) = \\frac{am^a}{x^{a+1}} + where :math:`a > 0` is the shape. - where :math:`a` is the shape and :math:`m` the scale. + The Pareto II distribution is a shifted and scaled version of the + Pareto I distribution, which can be found in `scipy.stats.pareto`. The Pareto distribution, named after the Italian economist Vilfredo Pareto, is a power law probability distribution @@ -4261,8 +4271,7 @@ cdef class RandomState: x = np.dot(x, np.sqrt(s)[:, None] * v) x += mean - x.shape = tuple(final_shape) - return x + return x.reshape(tuple(final_shape)) def multinomial(self, long n, object pvals, size=None): """ diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 79cacb2df4a4..39c2b1916c6c 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -741,15 +741,21 @@ RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state, RAND_INT_TYPE n, f2 = f1 * f1; z2 = z * z; w2 = w * w; + /* + * Note that the third and fourth error terms are subtracted. + * This is a correction from the original 1988 paper + * (Kachitvichyanukul & Schmeiser) which erroneously adds + * all four terms + */ if (A > (xm * log(f1 / x1) + (n - m + 0.5) * log(z / w) + (y - m) * log(w * r / (x1 * q)) + - (13680. - (462. - (132. - (99. - 140. / f2) / f2) / f2) / f2) / f1 / - 166320. + - (13680. - (462. - (132. - (99. - 140. / z2) / z2) / z2) / z2) / z / - 166320. + - (13680. - (462. - (132. - (99. - 140. / x2) / x2) / x2) / x2) / x1 / + (13860. - (462. - (132. - (99. - 140. / f2) / f2) / f2) / f2) / f1 / 166320. + - (13680. - (462. - (132. - (99. - 140. / w2) / w2) / w2) / w2) / w / + (13860. - (462. - (132. - (99. - 140. / z2) / z2) / z2) / z2) / z / + 166320. - + (13860. - (462. - (132. - (99. - 140. / x2) / x2) / x2) / x2) / x1 / + 166320. - + (13860. - (462. - (132. - (99. - 140. / w2) / w2) / w2) / w2) / w / 166320.)) { goto Step10; } diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c index e84bd19fdaee..f6fdf53bf6f2 100644 --- a/numpy/random/src/legacy/legacy-distributions.c +++ b/numpy/random/src/legacy/legacy-distributions.c @@ -267,6 +267,156 @@ static RAND_INT_TYPE legacy_random_binomial_inversion( return X; } +/* + * BTPE implementation preserved for compatibility. The last two error terms of + * the Stirling approximation are incorrectly added + */ +static RAND_INT_TYPE legacy_random_binomial_btpe(bitgen_t *bitgen_state, + RAND_INT_TYPE n, + double p, + binomial_t *binomial) { + double r, q, fm, p1, xm, xl, xr, c, laml, lamr, p2, p3, p4; + double a, u, v, s, F, rho, t, A, nrq, x1, x2, f1, f2, z, z2, w, w2, x; + RAND_INT_TYPE m, y, k, i; + + if (!(binomial->has_binomial) || (binomial->nsave != n) || + (binomial->psave != p)) { + /* initialize */ + binomial->nsave = n; + binomial->psave = p; + binomial->has_binomial = 1; + binomial->r = r = MIN(p, 1.0 - p); + binomial->q = q = 1.0 - r; + binomial->fm = fm = n * r + r; + binomial->m = m = (RAND_INT_TYPE)floor(binomial->fm); + binomial->p1 = p1 = floor(2.195 * sqrt(n * r * q) - 4.6 * q) + 0.5; + binomial->xm = xm = m + 0.5; + binomial->xl = xl = xm - p1; + binomial->xr = xr = xm + p1; + binomial->c = c = 0.134 + 20.5 / (15.3 + m); + a = (fm - xl) / (fm - xl * r); + binomial->laml = laml = a * (1.0 + a / 2.0); + a = (xr - fm) / (xr * q); + binomial->lamr = lamr = a * (1.0 + a / 2.0); + binomial->p2 = p2 = p1 * (1.0 + 2.0 * c); + binomial->p3 = p3 = p2 + c / laml; + binomial->p4 = p4 = p3 + c / lamr; + } else { + r = binomial->r; + q = binomial->q; + fm = binomial->fm; + m = binomial->m; + p1 = binomial->p1; + xm = binomial->xm; + xl = binomial->xl; + xr = binomial->xr; + c = binomial->c; + laml = binomial->laml; + lamr = binomial->lamr; + p2 = binomial->p2; + p3 = binomial->p3; + p4 = binomial->p4; + } + +/* sigh ... */ +Step10: + nrq = n * r * q; + u = next_double(bitgen_state) * p4; + v = next_double(bitgen_state); + if (u > p1) + goto Step20; + y = (RAND_INT_TYPE)floor(xm - p1 * v + u); + goto Step60; + +Step20: + if (u > p2) + goto Step30; + x = xl + (u - p1) / c; + v = v * c + 1.0 - fabs(m - x + 0.5) / p1; + if (v > 1.0) + goto Step10; + y = (RAND_INT_TYPE)floor(x); + goto Step50; + +Step30: + if (u > p3) + goto Step40; + y = (RAND_INT_TYPE)floor(xl + log(v) / laml); + /* Reject if v==0.0 since previous cast is undefined */ + if ((y < 0) || (v == 0.0)) + goto Step10; + v = v * (u - p2) * laml; + goto Step50; + +Step40: + y = (RAND_INT_TYPE)floor(xr - log(v) / lamr); + /* Reject if v==0.0 since previous cast is undefined */ + if ((y > n) || (v == 0.0)) + goto Step10; + v = v * (u - p3) * lamr; + +Step50: + k = llabs(y - m); + if ((k > 20) && (k < ((nrq) / 2.0 - 1))) + goto Step52; + + s = r / q; + a = s * (n + 1); + F = 1.0; + if (m < y) { + for (i = m + 1; i <= y; i++) { + F *= (a / i - s); + } + } else if (m > y) { + for (i = y + 1; i <= m; i++) { + F /= (a / i - s); + } + } + if (v > F) + goto Step10; + goto Step60; + +Step52: + rho = + (k / (nrq)) * ((k * (k / 3.0 + 0.625) + 0.16666666666666666) / nrq + 0.5); + t = -k * k / (2 * nrq); + /* log(0.0) ok here */ + A = log(v); + if (A < (t - rho)) + goto Step60; + if (A > (t + rho)) + goto Step10; + + x1 = (double)y + 1; + f1 = (double)m + 1; + z = (double)n + 1 - (double)m; + w = (double)n - (double)y + 1; + x2 = x1 * x1; + f2 = f1 * f1; + z2 = z * z; + w2 = w * w; + /* The last two terms are subtracted in the corrected version */ + if (A > (xm * log(f1 / x1) + (n - m + 0.5) * log(z / w) + + (y - m) * log(w * r / (x1 * q)) + + (13680. - (462. - (132. - (99. - 140. / f2) / f2) / f2) / f2) / f1 / + 166320. + + (13680. - (462. - (132. - (99. - 140. / z2) / z2) / z2) / z2) / z / + 166320. + + (13680. - (462. - (132. - (99. - 140. / x2) / x2) / x2) / x2) / x1 / + 166320. + + (13680. - (462. - (132. - (99. - 140. / w2) / w2) / w2) / w2) / w / + 166320.)) { + goto Step10; + } + +Step60: + if (p > 0.5) { + y = n - y; + } + + return y; +} + static RAND_INT_TYPE legacy_random_binomial_original(bitgen_t *bitgen_state, double p, RAND_INT_TYPE n, @@ -277,14 +427,14 @@ static RAND_INT_TYPE legacy_random_binomial_original(bitgen_t *bitgen_state, if (p * n <= 30.0) { return legacy_random_binomial_inversion(bitgen_state, n, p, binomial); } else { - return random_binomial_btpe(bitgen_state, n, p, binomial); + return legacy_random_binomial_btpe(bitgen_state, n, p, binomial); } } else { q = 1.0 - p; if (q * n <= 30.0) { return n - legacy_random_binomial_inversion(bitgen_state, n, q, binomial); } else { - return n - random_binomial_btpe(bitgen_state, n, q, binomial); + return n - legacy_random_binomial_btpe(bitgen_state, n, q, binomial); } } } @@ -469,7 +619,26 @@ int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p) { void legacy_random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix, double *pix, npy_intp d, binomial_t *binomial) { - random_multinomial(bitgen_state, n, mnix, pix, d, binomial); + /* + * Mirrors random_multinomial but dispatches to legacy_random_binomial, + * since bug fixes to random_binomial would otherwise change the + * RandomState stream. + */ + double remaining_p = 1.0; + npy_intp j; + RAND_INT_TYPE dn = n; + for (j = 0; j < (d - 1); j++) { + mnix[j] = (RAND_INT_TYPE)legacy_random_binomial( + bitgen_state, pix[j] / remaining_p, dn, binomial); + dn = dn - mnix[j]; + if (dn <= 0) { + break; + } + remaining_p -= pix[j]; + } + if (dn > 0) { + mnix[d - 1] = dn; + } } double legacy_vonmises(bitgen_t *bitgen_state, double mu, double kappa) { diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py index 9916f8ad3440..d85fed78617f 100644 --- a/numpy/random/tests/test_direct.py +++ b/numpy/random/tests/test_direct.py @@ -183,6 +183,31 @@ def test_generator_spawning(): assert new_rngs[0].uniform() != new_rngs[1].uniform() +def test_spawn_negative_n_children(): + """Test that spawn raises ValueError for negative n_children.""" + from numpy.random.bit_generator import SeedlessSeedSequence + + rng = np.random.default_rng(42) + seq = rng.bit_generator.seed_seq + + # Test SeedSequence.spawn + with pytest.raises(ValueError, match="n_children must be non-negative"): + seq.spawn(-1) + + # Test SeedlessSeedSequence.spawn + seedless = SeedlessSeedSequence() + with pytest.raises(ValueError, match="n_children must be non-negative"): + seedless.spawn(-1) + + # Test BitGenerator.spawn + with pytest.raises(ValueError, match="n_children must be non-negative"): + rng.bit_generator.spawn(-1) + + # Test Generator.spawn + with pytest.raises(ValueError, match="n_children must be non-negative"): + rng.spawn(-1) + + def test_non_spawnable(): from numpy.random.bit_generator import ISeedSequence @@ -433,7 +458,7 @@ def test_advance_symmetry(self): assert val_neg == val_pos assert val_big == val_pos - def test_advange_large(self): + def test_advance_large(self): rs = Generator(self.bit_generator(38219308213743)) pcg = rs.bit_generator state = pcg.state["state"] @@ -472,7 +497,7 @@ def test_advance_symmetry(self): assert val_neg == val_pos assert val_big == val_pos - def test_advange_large(self): + def test_advance_large(self): rs = Generator(self.bit_generator(38219308213743)) pcg = rs.bit_generator state = pcg.state diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 7d13c49149b3..13fa26d443b9 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -462,7 +462,7 @@ def test_full_range(self, endpoint): except Exception as e: raise AssertionError("No error should have been raised, " "but one was with the following " - "message:\n\n%s" % str(e)) + f"message:\n\n{e}") def test_full_range_array(self, endpoint): # Test for ticket #1690 @@ -477,7 +477,7 @@ def test_full_range_array(self, endpoint): except Exception as e: raise AssertionError("No error should have been raised, " "but one was with the following " - "message:\n\n%s" % str(e)) + f"message:\n\n{e}") def test_in_bounds_fuzz(self, endpoint): # Don't use fixed seed @@ -1685,7 +1685,7 @@ def test_standard_exponential(self): [1.093830802293668, 1.256977002164613]]) assert_array_almost_equal(actual, desired, decimal=15) - def test_standard_expoential_type_error(self): + def test_standard_exponential_type_error(self): assert_raises(TypeError, random.standard_exponential, dtype=np.int32) def test_standard_gamma(self): diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index 21093ef73eb6..1c345ac49a8a 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -1,7 +1,7 @@ import pytest import numpy as np -from numpy.random import MT19937, Generator +from numpy.random import MT19937, PCG64, Generator from numpy.testing import assert_, assert_array_equal @@ -112,7 +112,7 @@ def test_beta_expected_zero_frequency(self): # # CDF of the beta distribution at x: # p = mp.betainc(a, b, x1=0, x2=x, regularized=True) # n = 1000000 - # exprected_freq = float(n*p) + # expected_freq = float(n*p) # expected_freq = 77616.90831318991 assert 0.95 * expected_freq < nzeros < 1.05 * expected_freq @@ -219,3 +219,22 @@ def test_zipf_a_near_1(self): # discrete distribution truncated to signed 64 bit integers, more # than half should be less than 2**62. assert np.count_nonzero(sample < 2**62) > n / 2 + + def test_binomial_btpe_sign_fix(self): + # Regression test guarding the BTPE sign correction fix in + # distributions.c. The PCG64 state below exercises the rejection + # window where the corrected error terms flip the accept/reject + # outcome. + state = { + 'bit_generator': 'PCG64', + 'state': { + 'state': 339225526786748945562563845880185242573, + 'inc': 114135179160287400024908587472913682319, + }, + 'has_uint32': 0, + 'uinteger': 0, + } + bg = PCG64() + bg.state = state + rng = Generator(bg) + assert rng.binomial(500, 0.5) == 238 diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index f110aa892b31..f9ea843e12b5 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -210,8 +210,7 @@ def test_full_range(self): rng.randint(lbnd, ubnd, dtype=dt) except Exception as e: raise AssertionError("No error should have been raised, " - "but one was with the following " - "message:\n\n%s" % str(e)) + f"but one was with the following message:\n\n{e}") def test_in_bounds_fuzz(self): # Don't use fixed seed diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index 63ffb5a86389..1ff148b53a75 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -333,8 +333,7 @@ def test_full_range(self): rng.randint(lbnd, ubnd, dtype=dt) except Exception as e: raise AssertionError("No error should have been raised, " - "but one was with the following " - "message:\n\n%s" % str(e)) + f"but one was with the following message:\n\n{e}") def test_in_bounds_fuzz(self): # Don't use fixed seed diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py index 1c8882d1b672..d15c2ec258e8 100644 --- a/numpy/random/tests/test_randomstate_regression.py +++ b/numpy/random/tests/test_randomstate_regression.py @@ -161,7 +161,7 @@ def test_named_argument_initialization(self): rs2 = np.random.RandomState(seed=123456789) assert rs1.randint(0, 100) == rs2.randint(0, 100) - def test_choice_retun_dtype(self): + def test_choice_return_dtype(self): # GH 9867, now long since the NumPy default changed. c = np.random.choice(10, p=[.1] * 10, size=2) assert c.dtype == np.dtype(np.long) @@ -190,6 +190,40 @@ def test_p_zero_stream(self): assert_array_equal(rng.binomial(1, [0, 0.25, 0.5, 0.75, 1]), [0, 0, 0, 1, 1]) + def test_binomial_btpe_legacy_stream(self): + # Regression test for the BTPE sign correction fix: RandomState + # must preserve the pre-fix stream for compatibility. + state = { + 'bit_generator': 'PCG64', + 'state': { + 'state': 339225526786748945562563845880185242573, + 'inc': 114135179160287400024908587472913682319, + }, + 'has_uint32': 0, + 'uinteger': 0, + } + bg = random.PCG64() + bg.state = state + rs = random.RandomState(bg) + assert rs.binomial(500, 0.5) == 227 + + def test_multinomial_btpe_legacy_stream(self): + # See also test_binomial_btpe_legacy_stream. + # RandomState.multinomial relies on binomial internally. + state = { + 'bit_generator': 'PCG64', + 'state': { + 'state': 339225526786748945562563845880185242573, + 'inc': 114135179160287400024908587472913682319, + }, + 'has_uint32': 0, + 'uinteger': 0, + } + bg = random.PCG64() + bg.state = state + rs = random.RandomState(bg) + assert_array_equal(rs.multinomial(500, [0.5, 0.5]), [227, 273]) + def test_n_zero_stream(self): # Regression test for gh-14522. Ensure that future versions # generate the same variates as version 1.16. diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py index eeaf6d2b4bd3..f63c16650df8 100644 --- a/numpy/random/tests/test_regression.py +++ b/numpy/random/tests/test_regression.py @@ -5,7 +5,7 @@ import numpy as np from numpy import random -from numpy.testing import IS_PYPY, assert_, assert_array_equal, assert_raises +from numpy.testing import assert_, assert_array_equal, assert_raises class TestRegression: @@ -151,7 +151,6 @@ def __array__(self, dtype=None, copy=None): assert_array_equal(m.__array__(), np.arange(5)) @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") - @pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") @pytest.mark.parametrize( "cls", [ diff --git a/numpy/random/tests/test_smoke.py b/numpy/random/tests/test_smoke.py index 5353a72a1174..b30fddabd43e 100644 --- a/numpy/random/tests/test_smoke.py +++ b/numpy/random/tests/test_smoke.py @@ -63,8 +63,8 @@ def comp_state(state1, state2): if isinstance(state1, dict): for key in state1: identical &= comp_state(state1[key], state2[key]) - elif type(state1) != type(state2): - identical &= type(state1) == type(state2) + elif type(state1) is not type(state2): + identical &= type(state1) is type(state2) elif (isinstance(state1, (list, tuple, np.ndarray)) and isinstance( state2, (list, tuple, np.ndarray))): for s1, s2 in zip(state1, state2): @@ -486,13 +486,13 @@ def test_pickle(self): rg = self._create_rng().rg pick = pickle.dumps(rg) unpick = pickle.loads(pick) - assert_(type(rg) == type(unpick)) + assert_(type(rg) is type(unpick)) assert_(comp_state(rg.bit_generator.state, unpick.bit_generator.state)) pick = pickle.dumps(rg) unpick = pickle.loads(pick) - assert_(type(rg) == type(unpick)) + assert_(type(rg) is type(unpick)) assert_(comp_state(rg.bit_generator.state, unpick.bit_generator.state)) diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi index 684a7c36adec..3d6c53b1e638 100644 --- a/numpy/testing/__init__.pyi +++ b/numpy/testing/__init__.pyi @@ -2,7 +2,7 @@ from unittest import TestCase from . import _private as _private, overrides from ._private import extbuild as extbuild -from ._private.utils import ( +from ._private.utils import ( # type: ignore[deprecated] BLAS_SUPPORTS_FPE, HAS_LAPACK64, HAS_REFCOUNT, @@ -47,7 +47,7 @@ from ._private.utils import ( run_threaded, rundocs, runstring, - suppress_warnings, + suppress_warnings, # pyrefly: ignore[deprecated] tempdir, temppath, verbose, diff --git a/numpy/testing/_private/extbuild.py b/numpy/testing/_private/extbuild.py index 2a724b73cfc3..25b691cf07cb 100644 --- a/numpy/testing/_private/extbuild.py +++ b/numpy/testing/_private/extbuild.py @@ -139,8 +139,7 @@ def _make_methods(functions, modname): signature = '(PyObject *self, PyObject *args, PyObject *kwargs)' else: signature = '(PyObject *self, PyObject *args)' - methods_table.append( - "{\"%s\", (PyCFunction)%s, %s}," % (funcname, cfuncname, flags)) + methods_table.append(f'{{"{funcname}", (PyCFunction){cfuncname}, {flags}}},') func_code = f""" static PyObject* {cfuncname}{signature} {{ @@ -149,37 +148,36 @@ def _make_methods(functions, modname): """ codes.append(func_code) - body = "\n".join(codes) + """ - static PyMethodDef methods[] = { - %(methods)s - { NULL } - }; - static struct PyModuleDef moduledef = { + methods_str = '\n'.join(methods_table) + body = "\n".join(codes) + f""" + static PyMethodDef methods[] = {{ + {methods_str} + {{ NULL }} + }}; + static struct PyModuleDef moduledef = {{ PyModuleDef_HEAD_INIT, - "%(modname)s", /* m_name */ + "{modname}", /* m_name */ NULL, /* m_doc */ -1, /* m_size */ methods, /* m_methods */ - }; - """ % {'methods': '\n'.join(methods_table), 'modname': modname} + }}; + """ return body def _make_source(name, init, body): """ Combines the code fragments into source code ready to be compiled """ - code = """ + code = f""" #include - %(body)s + {body} PyMODINIT_FUNC - PyInit_%(name)s(void) { - %(init)s - } - """ % { - 'name': name, 'init': init, 'body': body, - } + PyInit_{name}(void) {{ + {init} + }} + """ return code diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index ed928a5ec7b4..4cd56cfc4fc4 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -6,6 +6,7 @@ import contextlib import gc import importlib.metadata +import importlib.util import operator import os import pathlib @@ -63,17 +64,17 @@ class KnownFailureException(Exception): else: IS_INSTALLED = True try: - if sys.version_info >= (3, 13): - IS_EDITABLE = np_dist.origin.dir_info.editable - else: + if sys.version_info < (3, 13): # Backport importlib.metadata.Distribution.origin - import json # noqa: E401 + import json import types origin = json.loads( np_dist.read_text('direct_url.json') or '{}', object_hook=lambda data: types.SimpleNamespace(**data), ) IS_EDITABLE = origin.dir_info.editable + else: + IS_EDITABLE = np_dist.origin.dir_info.editable except AttributeError: IS_EDITABLE = False @@ -588,7 +589,7 @@ def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): usecomplex = False def _build_err_msg(): - header = ('Arrays are not almost equal to %d decimals' % decimal) + header = (f'Arrays are not almost equal to {decimal} decimals') return build_err_msg([actual, desired], err_msg, verbose=verbose, header=header) @@ -711,7 +712,7 @@ def assert_approx_equal(actual, desired, significant=7, err_msg='', sc_actual = 0.0 msg = build_err_msg( [actual, desired], err_msg, - header='Items are not equal to %d significant digits:' % significant, + header=f'Items are not equal to {significant} significant digits:', verbose=verbose) try: # If one of desired/actual is not finite, handle it specially here: @@ -743,7 +744,7 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', ox, oy = x, y def isnumber(x): - return x.dtype.char in '?bhilqpBHILQPefdgFDG' + return type(x.dtype)._is_numeric def istime(x): return x.dtype.char in "Mm" @@ -783,8 +784,8 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): if robust_any_difference(x_id, y_id): msg = build_err_msg( [x, y], - err_msg + '\n%s location mismatch:' - % (hasval), verbose=verbose, header=header, + err_msg + f'\n{hasval} location mismatch:', + verbose=verbose, header=header, names=names, precision=precision) raise AssertionError(msg) @@ -904,8 +905,8 @@ def assert_same_inf_values(x, y, infs_mask): n_mismatch = reduced.size - reduced.sum(dtype=intp) n_elements = flagged.size if flagged.ndim != 0 else reduced.size percent_mismatch = 100 * n_mismatch / n_elements - remarks = [f'Mismatched elements: {n_mismatch} / {n_elements} ' - f'({percent_mismatch:.3g}%)'] + remarks = [(f'Mismatched elements: {n_mismatch} / {n_elements} ' + f'({percent_mismatch:.3g}%)')] if invalids.ndim != 0: if flagged.ndim > 0: positions = np.argwhere(np.asarray(~flagged))[invalids] @@ -954,7 +955,7 @@ def assert_same_inf_values(x, y, infs_mask): # note: this definition of relative error matches that one # used by assert_allclose (found in np.isclose) # Filter values where the divisor would be zero - nonzero = np.bool(y != 0) + nonzero = np.bool(y != np.zeros_like(y)) nonzero_and_invalid = np.logical_and(invalids, nonzero) if all(~nonzero_and_invalid): @@ -1054,6 +1055,8 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, Examples -------- + >>> import numpy as np + The first assert does not raise an exception: >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], @@ -1219,9 +1222,8 @@ def compare(x, y): return z < 1.5 * 10.0**(-decimal) - assert_array_compare(compare, actual, desired, err_msg=err_msg, - verbose=verbose, - header=('Arrays are not almost equal to %d decimals' % decimal), + assert_array_compare(compare, actual, desired, err_msg=err_msg, verbose=verbose, + header=f'Arrays are not almost equal to {decimal} decimals', precision=decimal) @@ -1441,12 +1443,13 @@ def rundocs(filename=None, raise_on_error=True): """ import doctest - from numpy.distutils.misc_util import exec_mod_from_location if filename is None: f = sys._getframe(1) filename = f.f_globals['__file__'] name = os.path.splitext(os.path.basename(filename))[0] - m = exec_mod_from_location(name, filename) + spec = importlib.util.spec_from_file_location(name, filename) + m = importlib.util.module_from_spec(spec) + spec.loader.exec_module(m) tests = doctest.DocTestFinder().find(m) runner = doctest.DocTestRunner(verbose=False) @@ -1461,7 +1464,8 @@ def rundocs(filename=None, raise_on_error=True): runner.run(test, out=out) if runner.failures > 0 and raise_on_error: - raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) + err_msg = '\n'.join(msg) + raise AssertionError(f"Some doctests failed:\n{err_msg}") def check_support_sve(__cache=[]): @@ -1566,7 +1570,7 @@ def decorate_methods(cls, decorator, testmatch=None): """ if testmatch is None: - testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) + testmatch = re.compile(rf'(?:^|[\\b_\\.{os.sep}-])[Tt]est') else: testmatch = re.compile(testmatch) cls_attr = cls.__dict__ @@ -1685,7 +1689,7 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, Array desired. rtol : float, optional Relative tolerance. - atol : float, optional + atol : float | np.timedelta64, optional Absolute tolerance. equal_nan : bool, optional. If True, NaNs will compare equal. @@ -1764,7 +1768,11 @@ def compare(x, y): equal_nan=equal_nan) actual, desired = np.asanyarray(actual), np.asanyarray(desired) - header = f'Not equal to tolerance rtol={rtol:g}, atol={atol:g}' + if isinstance(atol, np.timedelta64): + atol_str = str(atol) + else: + atol_str = f"{atol:g}" + header = f'Not equal to tolerance rtol={rtol:g}, atol={atol_str}' assert_array_compare(compare, actual, desired, err_msg=str(err_msg), verbose=verbose, header=header, equal_nan=equal_nan, strict=strict) @@ -1879,9 +1887,8 @@ def assert_array_max_ulp(a, b, maxulp=1, dtype=None): import numpy as np ret = nulp_diff(a, b, dtype) if not np.all(ret <= maxulp): - raise AssertionError("Arrays are not almost equal up to %g " - "ULP (max difference is %g ULP)" % - (maxulp, np.max(ret))) + raise AssertionError(f"Arrays are not almost equal up to {maxulp:g} " + f"ULP (max difference is {np.max(ret):g} ULP)") return ret @@ -2828,3 +2835,32 @@ def run_threaded(func, max_workers=8, pass_count=False, barrier.abort() for f in futures: f.result() + + +def requires_deep_recursion(func): + """Decorator to skip test if deep recursion is not supported.""" + import pytest + + @wraps(func) + def wrapper(*args, **kwargs): + if IS_PYSTON: + pytest.skip("Pyston disables recursion checking") + if IS_WASM: + pytest.skip("WASM has limited stack size") + if not IS_64BIT: + pytest.skip("32 bit Python has limited stack size") + cflags = sysconfig.get_config_var('CFLAGS') or '' + config_args = sysconfig.get_config_var('CONFIG_ARGS') or '' + address_sanitizer = ( + '-fsanitize=address' in cflags or + '--with-address-sanitizer' in config_args + ) + thread_sanitizer = ( + '-fsanitize=thread' in cflags or + '--with-thread-sanitizer' in config_args + ) + if address_sanitizer or thread_sanitizer: + pytest.skip("AddressSanitizer and ThreadSanitizer do not support " + "deep recursion") + return func(*args, **kwargs) + return wrapper diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 016bbecf4604..f2953298f02f 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -10,17 +10,13 @@ from pathlib import Path from re import Pattern from typing import ( Any, - AnyStr, ClassVar, Final, Generic, Literal as L, NoReturn, - ParamSpec, Self, SupportsIndex, - TypeAlias, - TypeVarTuple, overload, type_check_only, ) @@ -38,7 +34,7 @@ from numpy._typing import ( _ArrayLikeTD64_co, ) -__all__ = [ # noqa: RUF022 +__all__ = [ "IS_EDITABLE", "IS_MUSL", "IS_PYPY", @@ -91,25 +87,20 @@ __all__ = [ # noqa: RUF022 ### -_T = TypeVar("_T") -_Ts = TypeVarTuple("_Ts") -_Tss = ParamSpec("_Tss") -_ET = TypeVar("_ET", bound=BaseException, default=BaseException) -_FT = TypeVar("_FT", bound=Callable[..., Any]) _W_co = TypeVar("_W_co", bound=_WarnLog | None, default=_WarnLog | None, covariant=True) -_StrLike: TypeAlias = str | bytes -_RegexLike: TypeAlias = _StrLike | Pattern[Any] -_NumericArrayLike: TypeAlias = _ArrayLikeNumber_co | _ArrayLikeObject_co +type _StrLike = str | bytes +type _RegexLike = _StrLike | Pattern[Any] +type _NumericArrayLike = _ArrayLikeNumber_co | _ArrayLikeObject_co -_ExceptionSpec: TypeAlias = type[_ET] | tuple[type[_ET], ...] -_WarningSpec: TypeAlias = type[Warning] -_WarnLog: TypeAlias = list[warnings.WarningMessage] -_ToModules: TypeAlias = Iterable[types.ModuleType] +type _ExceptionSpec[ExceptionT: BaseException] = type[ExceptionT] | tuple[type[ExceptionT], ...] +type _WarningSpec = type[Warning] +type _WarnLog = list[warnings.WarningMessage] +type _ToModules = Iterable[types.ModuleType] # Must return a bool or an ndarray/generic type that is supported by `np.logical_and.reduce` -_ComparisonFunc: TypeAlias = Callable[ - [NDArray[Any], NDArray[Any]], +type _ComparisonFunc = Callable[ + [np.ndarray, np.ndarray], bool | np.bool | np.number | NDArray[np.bool | np.number | np.object_], ] @@ -158,7 +149,7 @@ class suppress_warnings: def __init__(self, /, forwarding_rule: L["always", "module", "once", "location"] = "always") -> None: ... def __enter__(self) -> Self: ... def __exit__(self, cls: type[BaseException] | None, exc: BaseException | None, tb: types.TracebackType | None, /) -> None: ... - def __call__(self, /, func: _FT) -> _FT: ... + def __call__[FuncT: Callable[..., Any]](self, /, func: FuncT) -> FuncT: ... # def filter(self, /, category: type[Warning] = ..., message: str = "", module: types.ModuleType | None = None) -> None: ... @@ -289,36 +280,36 @@ def assert_string_equal(actual: str, desired: str) -> None: ... # @overload -def assert_raises( - exception_class: _ExceptionSpec[_ET], +def assert_raises[ExceptionT: BaseException]( + exception_class: _ExceptionSpec[ExceptionT], /, *, msg: str | None = None, -) -> unittest.case._AssertRaisesContext[_ET]: ... +) -> unittest.case._AssertRaisesContext[ExceptionT]: ... @overload -def assert_raises( - exception_class: _ExceptionSpec, - callable: Callable[_Tss, Any], +def assert_raises[**Tss]( + exception_class: _ExceptionSpec[BaseException], + callable: Callable[Tss, Any], /, - *args: _Tss.args, - **kwargs: _Tss.kwargs, + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> None: ... # @overload -def assert_raises_regex( - exception_class: _ExceptionSpec[_ET], +def assert_raises_regex[ExceptionT: BaseException]( + exception_class: _ExceptionSpec[ExceptionT], expected_regexp: _RegexLike, *, msg: str | None = None, -) -> unittest.case._AssertRaisesContext[_ET]: ... +) -> unittest.case._AssertRaisesContext[ExceptionT]: ... @overload -def assert_raises_regex( - exception_class: _ExceptionSpec, +def assert_raises_regex[**Tss]( + exception_class: _ExceptionSpec[BaseException], expected_regexp: _RegexLike, - callable: Callable[_Tss, Any], - *args: _Tss.args, - **kwargs: _Tss.kwargs, + callable: Callable[Tss, Any], + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> None: ... # @@ -327,7 +318,7 @@ def assert_allclose( actual: _ArrayLikeTD64_co, desired: _ArrayLikeTD64_co, rtol: float = 1e-7, - atol: float = 0, + atol: float | np.timedelta64 = 0, equal_nan: bool = True, err_msg: object = "", verbose: bool = True, @@ -368,19 +359,24 @@ def assert_array_max_ulp( def assert_warns(warning_class: _WarningSpec) -> _GeneratorContextManager[None]: ... @overload @deprecated("Please use warnings.catch_warnings or pytest.warns instead") -def assert_warns(warning_class: _WarningSpec, func: Callable[_Tss, _T], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... +def assert_warns[**Tss, ReturnT]( + warning_class: _WarningSpec, + func: Callable[Tss, ReturnT], + *args: Tss.args, + **kwargs: Tss.kwargs, +) -> ReturnT: ... # @overload def assert_no_warnings() -> _GeneratorContextManager[None]: ... @overload -def assert_no_warnings(func: Callable[_Tss, _T], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... +def assert_no_warnings[**Tss, ReturnT](func: Callable[Tss, ReturnT], /, *args: Tss.args, **kwargs: Tss.kwargs) -> ReturnT: ... # @overload def assert_no_gc_cycles() -> _GeneratorContextManager[None]: ... @overload -def assert_no_gc_cycles(func: Callable[_Tss, Any], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> None: ... +def assert_no_gc_cycles[**Tss](func: Callable[Tss, Any], /, *args: Tss.args, **kwargs: Tss.kwargs) -> None: ... ### @@ -392,21 +388,21 @@ def tempdir( dir: None = None, ) -> _GeneratorContextManager[str]: ... @overload -def tempdir( +def tempdir[AnyStr: (bytes, str)]( suffix: AnyStr | None = None, prefix: AnyStr | None = None, *, dir: GenericPath[AnyStr], ) -> _GeneratorContextManager[AnyStr]: ... @overload -def tempdir( +def tempdir[AnyStr: (bytes, str)]( suffix: AnyStr | None = None, *, prefix: AnyStr, dir: GenericPath[AnyStr] | None = None, ) -> _GeneratorContextManager[AnyStr]: ... @overload -def tempdir( +def tempdir[AnyStr: (bytes, str)]( suffix: AnyStr, prefix: AnyStr | None = None, dir: GenericPath[AnyStr] | None = None, @@ -421,14 +417,14 @@ def temppath( text: bool = False, ) -> _GeneratorContextManager[str]: ... @overload -def temppath( +def temppath[AnyStr: (bytes, str)]( suffix: AnyStr | None, prefix: AnyStr | None, dir: GenericPath[AnyStr], text: bool = False, ) -> _GeneratorContextManager[AnyStr]: ... @overload -def temppath( +def temppath[AnyStr: (bytes, str)]( suffix: AnyStr | None = None, prefix: AnyStr | None = None, *, @@ -436,14 +432,14 @@ def temppath( text: bool = False, ) -> _GeneratorContextManager[AnyStr]: ... @overload -def temppath( +def temppath[AnyStr: (bytes, str)]( suffix: AnyStr | None, prefix: AnyStr, dir: GenericPath[AnyStr] | None = None, text: bool = False, ) -> _GeneratorContextManager[AnyStr]: ... @overload -def temppath( +def temppath[AnyStr: (bytes, str)]( suffix: AnyStr | None = None, *, prefix: AnyStr, @@ -451,7 +447,7 @@ def temppath( text: bool = False, ) -> _GeneratorContextManager[AnyStr]: ... @overload -def temppath( +def temppath[AnyStr: (bytes, str)]( suffix: AnyStr, prefix: AnyStr | None = None, dir: GenericPath[AnyStr] | None = None, @@ -479,27 +475,27 @@ def run_threaded( prepare_args: None = None, ) -> None: ... @overload -def run_threaded( - func: Callable[[*_Ts], None], +def run_threaded[*Ts]( + func: Callable[[*Ts], None], max_workers: int, pass_count: bool, pass_barrier: bool, outer_iterations: int, - prepare_args: tuple[*_Ts], + prepare_args: tuple[*Ts], ) -> None: ... @overload -def run_threaded( - func: Callable[[*_Ts], None], +def run_threaded[*Ts]( + func: Callable[[*Ts], None], max_workers: int = 8, pass_count: bool = False, pass_barrier: bool = False, outer_iterations: int = 1, *, - prepare_args: tuple[*_Ts], + prepare_args: tuple[*Ts], ) -> None: ... # -def runstring(astr: _StrLike | types.CodeType, dict: dict[str, Any] | None) -> Any: ... # noqa: ANN401 +def runstring(astr: _StrLike | types.CodeType, dict: dict[str, Any] | None) -> Any: ... def rundocs(filename: StrPath | None = None, raise_on_error: bool = True) -> None: ... def measure(code_str: _StrLike | ast.AST, times: int = 1, label: str | None = None) -> float: ... def break_cycles() -> None: ... diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 6d43343ef98a..3dd0ca6857e5 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -428,11 +428,16 @@ def test_datetime(self): def test_nat_items(self): # not a datetime - nadt_no_unit = np.datetime64("NaT") nadt_s = np.datetime64("NaT", "s") nadt_d = np.datetime64("NaT", "ns") + # not a timedelta - natd_no_unit = np.timedelta64("NaT") + with pytest.warns( + DeprecationWarning, + match="The 'generic' unit for NumPy timedelta is deprecated", + ): + nadt_no_unit = np.datetime64("NaT") + natd_no_unit = np.timedelta64("NaT") natd_s = np.timedelta64("NaT", "s") natd_d = np.timedelta64("NaT", "ns") @@ -1337,8 +1342,12 @@ def test_report_max_relative_error(self): def test_timedelta(self): # see gh-18286 - a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") - assert_allclose(a, a) + with pytest.warns( + DeprecationWarning, + match="The 'generic' unit for NumPy timedelta is deprecated", + ): + a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") + assert_allclose(a, a) def test_error_message_unsigned(self): """Check the message is formatted correctly when overflow can occur diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py index b88910ce457e..2458c393db04 100644 --- a/numpy/tests/test_ctypeslib.py +++ b/numpy/tests/test_ctypeslib.py @@ -65,7 +65,7 @@ def test_basic2(self): np._core._multiarray_umath.__file__) except ImportError as e: msg = ("ctypes is not available on this python: skipping the test" - " (import error was: %s)" % str(e)) + f" (import error was: {e})") print(msg) @@ -381,3 +381,25 @@ def test_overlapping(self): 'formats': [np.uint32, np.uint32] }) assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt) + + def test_cannot_convert_to_ctypes(self): + + _type_to_value = { + np.str_: ("aa",), + np.bool: (True,), + np.datetime64: ("2026-01-01",), + np.timedelta64: (1, "s") + } + for _scalar_type in np.sctypeDict.values(): + if _scalar_type == np.object_: + continue + + if _scalar_type in _type_to_value: + numpy_scalar = _scalar_type(*_type_to_value[_scalar_type]) + else: + numpy_scalar = _scalar_type(1) + + with pytest.raises( + TypeError, match="readonly arrays unsupported" + ): + np.ctypeslib.as_ctypes(numpy_scalar) diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index f6fa8611e181..a5cc63fe3a19 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -141,17 +141,6 @@ def test_NPY_NO_EXPORT(): "typing.mypy_plugin", "version", ]] -if sys.version_info < (3, 12): - PUBLIC_MODULES += [ - 'numpy.' + s for s in [ - "distutils", - "distutils.cpuinfo", - "distutils.exec_command", - "distutils.misc_util", - "distutils.log", - "distutils.system_info", - ] - ] PUBLIC_ALIASED_MODULES = [ @@ -202,66 +191,6 @@ def test_NPY_NO_EXPORT(): "random.bit_generator", "testing.print_coercion_tables", ]] -if sys.version_info < (3, 12): - PRIVATE_BUT_PRESENT_MODULES += [ - 'numpy.' + s for s in [ - "distutils.armccompiler", - "distutils.fujitsuccompiler", - "distutils.ccompiler", - 'distutils.ccompiler_opt', - "distutils.command", - "distutils.command.autodist", - "distutils.command.bdist_rpm", - "distutils.command.build", - "distutils.command.build_clib", - "distutils.command.build_ext", - "distutils.command.build_py", - "distutils.command.build_scripts", - "distutils.command.build_src", - "distutils.command.config", - "distutils.command.config_compiler", - "distutils.command.develop", - "distutils.command.egg_info", - "distutils.command.install", - "distutils.command.install_clib", - "distutils.command.install_data", - "distutils.command.install_headers", - "distutils.command.sdist", - "distutils.conv_template", - "distutils.core", - "distutils.extension", - "distutils.fcompiler", - "distutils.fcompiler.absoft", - "distutils.fcompiler.arm", - "distutils.fcompiler.compaq", - "distutils.fcompiler.environment", - "distutils.fcompiler.g95", - "distutils.fcompiler.gnu", - "distutils.fcompiler.hpux", - "distutils.fcompiler.ibm", - "distutils.fcompiler.intel", - "distutils.fcompiler.lahey", - "distutils.fcompiler.mips", - "distutils.fcompiler.nag", - "distutils.fcompiler.none", - "distutils.fcompiler.pathf95", - "distutils.fcompiler.pg", - "distutils.fcompiler.nv", - "distutils.fcompiler.sun", - "distutils.fcompiler.vast", - "distutils.fcompiler.fujitsu", - "distutils.from_template", - "distutils.intelccompiler", - "distutils.lib2def", - "distutils.line_endings", - "distutils.mingw32ccompiler", - "distutils.msvccompiler", - "distutils.npy_pkg_config", - "distutils.numpy_distribution", - "distutils.pathccompiler", - "distutils.unixccompiler", - ] - ] def is_unexpected(name): @@ -274,12 +203,6 @@ def is_unexpected(name): ) -if sys.version_info >= (3, 12): - SKIP_LIST = [] -else: - SKIP_LIST = ["numpy.distutils.msvc9compiler"] - - def test_all_modules_are_expected(): """ Test that we don't add anything that looks like a new public module by @@ -290,7 +213,7 @@ def test_all_modules_are_expected(): for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__, prefix=np.__name__ + '.', onerror=None): - if is_unexpected(modname) and modname not in SKIP_LIST: + if is_unexpected(modname): # We have a name that is new. If that's on purpose, add it to # PUBLIC_MODULES. We don't expect to have to add anything to # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name! @@ -303,7 +226,6 @@ def test_all_modules_are_expected(): # Stuff that clearly shouldn't be in the API and is detected by the next test # below SKIP_LIST_2 = [ - 'numpy.lib.math', 'numpy.matlib.char', 'numpy.matlib.rec', 'numpy.matlib.emath', @@ -315,12 +237,6 @@ def test_all_modules_are_expected(): 'numpy.matlib.ctypeslib', 'numpy.matlib.ma', ] -if sys.version_info < (3, 12): - SKIP_LIST_2 += [ - 'numpy.distutils.log.sys', - 'numpy.distutils.log.logging', - 'numpy.distutils.log.warnings', - ] def test_all_modules_are_expected_2(): @@ -550,6 +466,7 @@ def test_core_shims_coherence(): assert member is getattr(core, member_name) +@pytest.mark.filterwarnings(r"ignore:\w+ chararray \w+:DeprecationWarning") def test_functions_single_location(): """ Check that each public function is available from one location only. @@ -760,6 +677,7 @@ def _check_correct_qualname_and_module(obj) -> bool: ) +@pytest.mark.filterwarnings(r"ignore:\w+ chararray \w+:DeprecationWarning") def test___qualname___and___module___attribute(): # NumPy messes with module and name/qualname attributes, but any object # should be discoverable based on its module and qualname, so test that. diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index 01b743941cf2..e5f0a07436c8 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -5,15 +5,13 @@ import os import subprocess import sys -from os.path import dirname, isfile, join as pathjoin +from os.path import dirname import pytest import numpy as np from numpy.testing import IS_WASM, assert_equal -is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) - def find_f2py_commands(): if sys.platform == 'win32': @@ -33,7 +31,6 @@ def find_f2py_commands(): return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor] -@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace") @pytest.mark.xfail(reason="Test is unreliable") @pytest.mark.parametrize('f2py_cmd', find_f2py_commands()) def test_f2py(f2py_cmd): diff --git a/numpy/tests/test_warnings.py b/numpy/tests/test_warnings.py index 7efa2a1d1896..84f2f79d1f86 100644 --- a/numpy/tests/test_warnings.py +++ b/numpy/tests/test_warnings.py @@ -34,7 +34,7 @@ def visit_Call(self, node): ast.NodeVisitor.generic_visit(self, node) if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': - if getattr(node.args[0], "value", None) == "ignore": + if node.args and getattr(node.args[0], "value", None) == "ignore": if not self.__filename.name.startswith("test_"): raise AssertionError( "ignore filters should only be used in tests; " @@ -47,14 +47,16 @@ def visit_Call(self, node): # This file return - # See if stacklevel exists: + # See if stacklevel or skip_file_prefixes exists: if len(node.args) == 3: return args = {kw.arg for kw in node.keywords} if "stacklevel" in args: return + if "skip_file_prefixes" in args: + return raise AssertionError( - "warnings should have an appropriate stacklevel; " + "warnings should have an appropriate stacklevel or skip_file_prefixes; " f"found in {self.__filename} on line {node.lineno}") diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index ef4c0885257b..04b26d0faf64 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -64,17 +64,24 @@ ndarray ~~~~~~~ -It's possible to mutate the dtype of an array at runtime. For example, -the following code is valid: +The `numpy.ndarray` class is a `generic type`_ that accepts two type arguments: + +1. The type of `numpy.ndarray.shape`, which must be a `tuple` of `int`, e.g. + ``tuple[int, int]`` (2-D shape) or ``tuple[()]`` (0-D shape). + The default shape is ``tuple[Any, ...]``, which represents an unknown shape with + *any* number of dimensions. + Currently, ``Literal`` ints or other more specific types are not supported. +2. The type of `numpy.ndarray.dtype`, which must be a subtype of `numpy.dtype` such as + ``numpy.dtype[numpy.float64]``. If omitted, it will default to ``numpy.dtype[Any]``. .. code-block:: python - >>> x = np.array([1, 2]) - >>> x.dtype = np.bool + >>> import numpy as np + + >>> type ImageRGB = np.ndarray[tuple[int, int, int], np.dtype[np.uint8]] + >>> type Vector[S: np.generic] = np.ndarray[tuple[int], np.dtype[S]] -This sort of mutation is not allowed by the types. Users who want to -write statically typed code should instead use the `numpy.ndarray.view` -method to create a view of the array with a different dtype. +.. _generic type: https://typing.python.org/en/latest/spec/generics.html DTypeLike ~~~~~~~~~ @@ -93,7 +100,7 @@ Number precision ~~~~~~~~~~~~~~~~ -The precision of `numpy.number` subclasses is treated as a invariant generic +The precision of `numpy.number` subclasses is treated as an invariant generic parameter (see :class:`~NBitBase`), simplifying the annotating of processes involving precision-based casting. diff --git a/numpy/typing/__init__.pyi b/numpy/typing/__init__.pyi index 7a4c7b41079c..5af10da218d9 100644 --- a/numpy/typing/__init__.pyi +++ b/numpy/typing/__init__.pyi @@ -1,3 +1,8 @@ -from numpy._typing import ArrayLike, DTypeLike, NBitBase, NDArray +from numpy._typing import ( # type: ignore[deprecated] + ArrayLike, + DTypeLike, + NBitBase, + NDArray, +) __all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index fb78eb077c44..04014a9e867b 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -39,7 +39,7 @@ """ from collections.abc import Callable, Iterable -from typing import TYPE_CHECKING, Final, TypeAlias, cast +from typing import TYPE_CHECKING, Final, cast import numpy as np @@ -108,14 +108,14 @@ def _get_c_intp_name() -> str: from mypy.nodes import ImportFrom, MypyFile, Statement from mypy.plugin import AnalyzeTypeContext, Plugin -except ModuleNotFoundError as e: +except ModuleNotFoundError as _exc: def plugin(version: str) -> type: - raise e + raise _exc else: - _HookFunc: TypeAlias = Callable[[AnalyzeTypeContext], mypy.types.Type] + type _HookFunc = Callable[[AnalyzeTypeContext], mypy.types.Type] def _hook(ctx: AnalyzeTypeContext) -> mypy.types.Type: """Replace a type-alias with a concrete ``NBitBase`` subclass.""" diff --git a/numpy/typing/tests/data/fail/char.pyi b/numpy/typing/tests/data/fail/char.pyi index 3dbe5eda296e..91909d118884 100644 --- a/numpy/typing/tests/data/fail/char.pyi +++ b/numpy/typing/tests/data/fail/char.pyi @@ -58,6 +58,3 @@ np.char.index(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] np.char.index(AR_S, "a", end=9) # type: ignore[arg-type] np.char.rindex(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] np.char.rindex(AR_S, "a", end=9) # type: ignore[arg-type] - -np.char.isdecimal(AR_S) # type: ignore[arg-type] -np.char.isnumeric(AR_S) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/chararray.pyi b/numpy/typing/tests/data/fail/chararray.pyi index 589895510227..806ec5a0d303 100644 --- a/numpy/typing/tests/data/fail/chararray.pyi +++ b/numpy/typing/tests/data/fail/chararray.pyi @@ -2,8 +2,8 @@ from typing import Any import numpy as np -AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] -AR_S: np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] +AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] # type: ignore[deprecated] +AR_S: np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] # type: ignore[deprecated] AR_S.encode() # type: ignore[misc] AR_U.decode() # type: ignore[misc] diff --git a/numpy/typing/tests/data/fail/fromnumeric.pyi b/numpy/typing/tests/data/fail/fromnumeric.pyi index 92b0cb366207..f38bc2eeabbd 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.pyi +++ b/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -36,17 +36,18 @@ np.partition(a, 0, axis="bob") # type: ignore[call-overload] np.partition(A, 0, kind="bob") # type: ignore[call-overload] np.partition(A, 0, order=range(5)) # type: ignore[arg-type] -np.argpartition(a, None) # type: ignore[arg-type] -np.argpartition(a, 0, axis="bob") # type: ignore[arg-type] -np.argpartition(A, 0, kind="bob") # type: ignore[arg-type] +np.argpartition(a, None) # type: ignore[call-overload] +np.argpartition(a, 0, axis="bob") # type: ignore[call-overload] +np.argpartition(A, 0, kind="bob") # type: ignore[call-overload] np.argpartition(A, 0, order=range(5)) # type: ignore[arg-type] +np.argpartition(AR_f4, 0, order="a") # type: ignore[arg-type] np.sort(A, axis="bob") # type: ignore[call-overload] np.sort(A, kind="bob") # type: ignore[call-overload] np.sort(A, order=range(5)) # type: ignore[arg-type] -np.argsort(A, axis="bob") # type: ignore[arg-type] -np.argsort(A, kind="bob") # type: ignore[arg-type] +np.argsort(A, axis="bob") # type: ignore[call-overload] +np.argsort(A, kind="bob") # type: ignore[call-overload] np.argsort(A, order=range(5)) # type: ignore[arg-type] np.argmax(A, axis="bob") # type: ignore[call-overload] @@ -74,7 +75,7 @@ np.trace(A, axis2=[]) # type: ignore[call-overload] np.ravel(a, order="bob") # type: ignore[call-overload] -np.nonzero(0) # type: ignore[arg-type] +np.nonzero(0) # type: ignore[call-overload] np.compress([True], A, axis=1.0) # type: ignore[call-overload] @@ -117,17 +118,17 @@ np.prod(a, out=False) # type: ignore[call-overload] np.prod(a, keepdims=1.0) # type: ignore[call-overload] np.prod(a, initial=int) # type: ignore[call-overload] np.prod(a, where=1.0) # type: ignore[call-overload] -np.prod(AR_U) # type: ignore[arg-type] +np.prod(AR_U) # type: ignore[type-var] np.cumprod(a, axis=1.0) # type: ignore[call-overload] np.cumprod(a, out=False) # type: ignore[call-overload] -np.cumprod(AR_U) # type: ignore[arg-type] +np.cumprod(AR_U) # type: ignore[type-var] np.size(a, axis=1.0) # type: ignore[arg-type] np.around(a, decimals=1.0) # type: ignore[call-overload] np.around(a, out=type) # type: ignore[call-overload] -np.around(AR_U) # type: ignore[arg-type] +np.around(AR_U) # type: ignore[type-var] np.mean(a, axis=1.0) # type: ignore[call-overload] np.mean(a, out=False) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/index_tricks.pyi b/numpy/typing/tests/data/fail/index_tricks.pyi index 8b7b1ae2b5bf..f1f78bd5d805 100644 --- a/numpy/typing/tests/data/fail/index_tricks.pyi +++ b/numpy/typing/tests/data/fail/index_tricks.pyi @@ -11,4 +11,4 @@ np.mgrid[...] # type: ignore[index] np.ogrid[1] # type: ignore[index] np.ogrid[...] # type: ignore[index] np.fill_diagonal(AR_LIKE_f, 2) # type: ignore[arg-type] -np.diag_indices(1.0) # type: ignore[arg-type] +np.diag_indices(1.0) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/linalg.pyi b/numpy/typing/tests/data/fail/linalg.pyi index 78aceb235f8d..c59238eab878 100644 --- a/numpy/typing/tests/data/fail/linalg.pyi +++ b/numpy/typing/tests/data/fail/linalg.pyi @@ -9,44 +9,40 @@ np.linalg.tensorsolve(AR_O, AR_O) # type: ignore[arg-type] np.linalg.solve(AR_O, AR_O) # type: ignore[arg-type] -np.linalg.tensorinv(AR_O) # type: ignore[arg-type] - -np.linalg.inv(AR_O) # type: ignore[arg-type] +np.linalg.tensorinv(AR_O) # type: ignore[type-var] +np.linalg.inv(AR_O) # type: ignore[type-var] +np.linalg.pinv(AR_O) # type: ignore[type-var] +np.linalg.cholesky(AR_O) # type: ignore[type-var] np.linalg.matrix_power(AR_M, 5) # type: ignore[arg-type] -np.linalg.cholesky(AR_O) # type: ignore[arg-type] +np.linalg.eig(AR_O) # type: ignore[arg-type] + +np.linalg.eigh(AR_O) # type: ignore[arg-type] +np.linalg.eigh(AR_O, UPLO="bob") # type: ignore[call-overload] -np.linalg.qr(AR_O) # type: ignore[arg-type] +np.linalg.qr(AR_O) # type: ignore[type-var] np.linalg.qr(AR_f8, mode="bob") # type: ignore[call-overload] +np.linalg.svd(AR_O) # type: ignore[arg-type] + np.linalg.eigvals(AR_O) # type: ignore[arg-type] np.linalg.eigvalsh(AR_O) # type: ignore[arg-type] np.linalg.eigvalsh(AR_O, UPLO="bob") # type: ignore[call-overload] -np.linalg.eig(AR_O) # type: ignore[arg-type] - -np.linalg.eigh(AR_O) # type: ignore[arg-type] -np.linalg.eigh(AR_O, UPLO="bob") # type: ignore[call-overload] - -np.linalg.svd(AR_O) # type: ignore[arg-type] - np.linalg.svdvals(AR_O) # type: ignore[arg-type] np.linalg.svdvals(AR_M) # type: ignore[arg-type] np.linalg.svdvals(x=AR_f8) # type: ignore[call-overload] -np.linalg.cond(AR_O) # type: ignore[arg-type] -np.linalg.cond(AR_f8, p="bob") # type: ignore[arg-type] - np.linalg.matrix_rank(AR_O) # type: ignore[arg-type] -np.linalg.pinv(AR_O) # type: ignore[arg-type] +np.linalg.cond(AR_O) # type: ignore[arg-type] +np.linalg.cond(AR_f8, p="bob") # type: ignore[call-overload] np.linalg.slogdet(AR_O) # type: ignore[arg-type] - np.linalg.det(AR_O) # type: ignore[arg-type] np.linalg.norm(AR_f8, ord="bob") # type: ignore[call-overload] -np.linalg.multi_dot([AR_M]) # type: ignore[list-item] +np.linalg.multi_dot([AR_M]) # type: ignore[type-var] diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 084ae971bdd0..59698264e7fd 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -1,16 +1,13 @@ -from typing import TypeAlias, TypeVar - import numpy as np import numpy.typing as npt from numpy._typing import _AnyShape -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, np.dtype[_ScalarT]] +type _MArray[ScalarT: np.generic] = np.ma.MaskedArray[_AnyShape, np.dtype[ScalarT]] MAR_1d_f8: np.ma.MaskedArray[tuple[int], np.dtype[np.float64]] -MAR_b: MaskedArray[np.bool] -MAR_c: MaskedArray[np.complex128] -MAR_td64: MaskedArray[np.timedelta64] +MAR_b: _MArray[np.bool] +MAR_c: _MArray[np.complex128] +MAR_td64: _MArray[np.timedelta64] AR_b: npt.NDArray[np.bool] @@ -107,9 +104,9 @@ MAR_1d_f8.partition(order=lambda: "cabbage") # type: ignore[arg-type, call-arg] MAR_1d_f8.partition(AR_b) # type: ignore[arg-type] MAR_1d_f8.argpartition(["cabbage"]) # type: ignore[arg-type] -MAR_1d_f8.argpartition(axis=(0, 1)) # type: ignore[arg-type, call-arg] -MAR_1d_f8.argpartition(kind="cabbage") # type: ignore[arg-type, call-arg] -MAR_1d_f8.argpartition(order=lambda: "cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(axis=(0, 1)) # type: ignore[call-overload] +MAR_1d_f8.argpartition(kind="cabbage") # type: ignore[call-overload] +MAR_1d_f8.argpartition(order=lambda: "cabbage") # type: ignore[call-overload] MAR_1d_f8.argpartition(AR_b) # type: ignore[arg-type] np.ma.ndim(lambda: "lambda") # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/ndarray_misc.pyi b/numpy/typing/tests/data/fail/ndarray_misc.pyi index 29418930061c..47d797291cfd 100644 --- a/numpy/typing/tests/data/fail/ndarray_misc.pyi +++ b/numpy/typing/tests/data/fail/ndarray_misc.pyi @@ -21,7 +21,7 @@ f8.argpartition(0) # type: ignore[attr-defined] f8.partition(0) # type: ignore[attr-defined] f8.dot(1) # type: ignore[attr-defined] -# NOTE: The following functions retur `Never`, causing mypy to stop analysis at that +# NOTE: The following functions return `Never`, causing mypy to stop analysis at that # point, which we circumvent by wrapping them in a function. def f8_diagonal(x: np.float64) -> Never: diff --git a/numpy/typing/tests/data/fail/strings.pyi b/numpy/typing/tests/data/fail/strings.pyi index 328a521ae679..11c525a332a0 100644 --- a/numpy/typing/tests/data/fail/strings.pyi +++ b/numpy/typing/tests/data/fail/strings.pyi @@ -4,14 +4,6 @@ import numpy.typing as npt AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] -np.strings.equal(AR_U, AR_S) # type: ignore[arg-type] -np.strings.not_equal(AR_U, AR_S) # type: ignore[arg-type] - -np.strings.greater_equal(AR_U, AR_S) # type: ignore[arg-type] -np.strings.less_equal(AR_U, AR_S) # type: ignore[arg-type] -np.strings.greater(AR_U, AR_S) # type: ignore[arg-type] -np.strings.less(AR_U, AR_S) # type: ignore[arg-type] - np.strings.encode(AR_S) # type: ignore[arg-type] np.strings.decode(AR_U) # type: ignore[arg-type] @@ -45,8 +37,5 @@ np.strings.index(AR_S, "a", end=9) # type: ignore[arg-type] np.strings.rindex(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] np.strings.rindex(AR_S, "a", end=9) # type: ignore[arg-type] -np.strings.isdecimal(AR_S) # type: ignore[arg-type] -np.strings.isnumeric(AR_S) # type: ignore[arg-type] - np.strings.replace(AR_U, b"_", b"-", 10) # type: ignore[arg-type] np.strings.replace(AR_S, "_", "-", 1) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/type_check.pyi b/numpy/typing/tests/data/fail/type_check.pyi index 8b68e996304c..d3cb8d2c0655 100644 --- a/numpy/typing/tests/data/fail/type_check.pyi +++ b/numpy/typing/tests/data/fail/type_check.pyi @@ -9,4 +9,5 @@ np.isrealobj(DTYPE_i8) # type: ignore[arg-type] np.typename(DTYPE_i8) # type: ignore[call-overload] np.typename("invalid") # type: ignore[call-overload] -np.common_type(np.timedelta64()) # type: ignore[arg-type] +_td64: np.timedelta64 +np.common_type(_td64) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index e347ec096e21..4d5a7ab57933 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -392,13 +392,13 @@ def __rpow__(self, value: Any) -> Object: dt - i8 td + td -td + i -td + i4 -td + i8 +td + i # type: ignore[deprecated] +td + i4 # type: ignore[deprecated] +td + i8 # type: ignore[deprecated] td - td -td - i -td - i4 -td - i8 +td - i # type: ignore[deprecated] +td - i4 # type: ignore[deprecated] +td - i8 # type: ignore[deprecated] td / f td / f4 td / f8 diff --git a/numpy/typing/tests/data/pass/array_constructors.py b/numpy/typing/tests/data/pass/array_constructors.py index d91d257cb17c..27cbffa06a5c 100644 --- a/numpy/typing/tests/data/pass/array_constructors.py +++ b/numpy/typing/tests/data/pass/array_constructors.py @@ -1,7 +1,6 @@ from typing import Any import numpy as np -import numpy.typing as npt class Index: @@ -9,7 +8,7 @@ def __index__(self) -> int: return 0 -class SubClass(npt.NDArray[np.float64]): +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.float64]]): pass diff --git a/numpy/typing/tests/data/pass/array_like.py b/numpy/typing/tests/data/pass/array_like.py index f922beae34ce..f1e09b03a4ec 100644 --- a/numpy/typing/tests/data/pass/array_like.py +++ b/numpy/typing/tests/data/pass/array_like.py @@ -1,11 +1,5 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - import numpy as np - -if TYPE_CHECKING: - from numpy._typing import ArrayLike, NDArray, _SupportsArray +from numpy._typing import ArrayLike, NDArray, _SupportsArray x1: ArrayLike = True x2: ArrayLike = 5 diff --git a/numpy/typing/tests/data/pass/comparisons.py b/numpy/typing/tests/data/pass/comparisons.py index b2e52762c7a8..cfb15cddc36e 100644 --- a/numpy/typing/tests/data/pass/comparisons.py +++ b/numpy/typing/tests/data/pass/comparisons.py @@ -20,7 +20,7 @@ b_ = np.bool() b = False -c = complex() +c = 0j f = 0.0 i = 0 @@ -34,8 +34,8 @@ AR_S: np.ndarray[Any, np.dtype[np.bytes_]] = np.array([b"a"], "S") AR_T = cast(np.ndarray[Any, np.dtypes.StringDType], np.array(["a"], "T")) AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.array(["a"], "U") -AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64("1")]) -AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64("1")]) +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64("1", "s")]) +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64("1", "s")]) AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([1], dtype=object) # Arrays @@ -99,11 +99,11 @@ dt > dt td > td -td > i -td > i4 -td > i8 +td > i # type: ignore[deprecated] +td > i4 # type: ignore[deprecated] +td > i8 # type: ignore[deprecated] td > AR_i -td > SEQ +td > SEQ # type: ignore[deprecated] # boolean diff --git a/numpy/typing/tests/data/pass/literal.py b/numpy/typing/tests/data/pass/literal.py index f1e0cb2a69d3..c6d86baf7397 100644 --- a/numpy/typing/tests/data/pass/literal.py +++ b/numpy/typing/tests/data/pass/literal.py @@ -25,11 +25,11 @@ (KACF, AR.flatten), (KACF, AR.ravel), (KACF, partial(np.array, 1)), - # NOTE: __call__ is needed due to mypy bugs (#17620, #17631) + # NOTE: __call__ is needed due to python/mypy#17620 (KACF, partial(np.ndarray.__call__, 1)), - (CF, partial(np.zeros.__call__, 1)), - (CF, partial(np.ones.__call__, 1)), - (CF, partial(np.empty.__call__, 1)), + (CF, partial(np.zeros, 1)), + (CF, partial(np.ones, 1)), + (CF, partial(np.empty, 1)), (CF, partial(np.full, 1, 1)), (KACF, partial(np.zeros_like, AR)), (KACF, partial(np.ones_like, AR)), diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index 3ccea66861eb..72cbc5d9b98e 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -1,12 +1,11 @@ import datetime as dt -from typing import Any, TypeAlias, TypeVar, cast +from typing import Any, cast import numpy as np import numpy.typing as npt from numpy._typing import _Shape -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] +type MaskedArray[ScalarT: np.generic] = np.ma.MaskedArray[_Shape, np.dtype[ScalarT]] # mypy: disable-error-code=no-untyped-call diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index 40c84d8641bd..0a9302a2a116 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -9,14 +9,15 @@ from __future__ import annotations import operator +from collections.abc import Hashable from typing import Any, cast import numpy as np import numpy.typing as npt -class SubClass(npt.NDArray[np.float64]): ... -class IntSubClass(npt.NDArray[np.intp]): ... +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.float64]]): ... +class IntSubClass(np.ndarray[tuple[Any, ...], np.dtype[np.intp]]): ... i4 = np.int32(1) @@ -192,3 +193,7 @@ class IntSubClass(npt.NDArray[np.intp]): ... A_void: npt.NDArray[np.void] = np.empty(3, [("yop", float), ("yap", float)]) A_void["yop"] = A_float[:, 0] A_void["yap"] = A_float[:, 1] + +# regression test for https://github.com/numpy/numpy/issues/30445 +def f(x: np.generic) -> Hashable: + return x diff --git a/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py b/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py index 0ca3dff392e1..1458339bf6ae 100644 --- a/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py +++ b/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py @@ -11,13 +11,13 @@ nd1.reshape(4, order="C") # resize -nd1.resize() -nd1.resize(4) -nd1.resize(2, 2) -nd1.resize((2, 2)) +nd1.resize() # type: ignore[deprecated] +nd1.resize(4) # type: ignore[deprecated] +nd1.resize(2, 2) # type: ignore[deprecated] +nd1.resize((2, 2)) # type: ignore[deprecated] -nd1.resize((2, 2), refcheck=True) -nd1.resize(4, refcheck=True) +nd1.resize((2, 2), refcheck=True) # type: ignore[deprecated] +nd1.resize(4, refcheck=True) # type: ignore[deprecated] nd2 = np.array([[1, 2], [3, 4]]) diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index eeb707b255e1..dff5bc005974 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -105,16 +105,16 @@ def __float__(self) -> float: np.datetime64(None) np.datetime64(None, "D") -np.timedelta64() -np.timedelta64(0) +np.timedelta64() # type: ignore[deprecated] +np.timedelta64(0) # type: ignore[deprecated] np.timedelta64(0, "D") np.timedelta64(0, ('ms', 3)) np.timedelta64(0, b"D") -np.timedelta64("3") -np.timedelta64(b"5") -np.timedelta64(np.timedelta64(2)) -np.timedelta64(dt.timedelta(2)) -np.timedelta64(None) +np.timedelta64("3") # type: ignore[deprecated] +np.timedelta64(b"5") # type: ignore[deprecated] +np.timedelta64(np.timedelta64(2)) # type: ignore[deprecated] +np.timedelta64(dt.timedelta(2)) # type: ignore[deprecated] +np.timedelta64(None) # type: ignore[deprecated] np.timedelta64(None, "D") np.void(1) @@ -132,7 +132,7 @@ def __float__(self) -> float: f8 = np.float64() c16 = np.complex128() b = np.bool() -td = np.timedelta64() +td = np.timedelta64(0, "ns") U = np.str_("1") S = np.bytes_("1") AR = np.array(1, dtype=np.float64) @@ -247,3 +247,16 @@ def __float__(self) -> float: c16.reshape(1) U.reshape(1) S.reshape(1) + +# Indexing scalars with any of {None, ..., tuple[()], tuple[None], tuple[...], +# tuple[None, ...]} should be valid +b[None] +i8[None] +u8[None] +f8[None] +c16[None] +c16[...] +c16[()] +c16[(None,)] +c16[(...,)] +c16[None, None] diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 491bce43fdae..8edf2d7fed9e 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -406,20 +406,24 @@ assert_type(M8 - M8, np.timedelta64) assert_type(M8 - i, np.datetime64) assert_type(M8 - i8, np.datetime64) -assert_type(M8_none + m8, np.datetime64[None]) assert_type(M8_none + i, np.datetime64[None]) -assert_type(M8_none + i8, np.datetime64[None]) -assert_type(M8_none - M8, np.timedelta64[None]) -assert_type(M8_none - m8, np.datetime64[None]) assert_type(M8_none - i, np.datetime64[None]) + +assert_type(M8_none + i8, np.datetime64[None]) assert_type(M8_none - i8, np.datetime64[None]) +# NOTE: Mypy incorrectly infers `timedelta64[Any]`, but pyright behaves correctly. +assert_type(M8_none + m8, np.datetime64[None]) # type: ignore[assert-type] +assert_type(M8_none - M8, np.timedelta64[None]) # type: ignore[assert-type] +# NOTE: Mypy incorrectly infers `datetime64[Any]`, but pyright behaves correctly. +assert_type(M8_none - m8, np.datetime64[None]) # type: ignore[assert-type] + assert_type(m8 + m8, np.timedelta64) -assert_type(m8 + i, np.timedelta64) -assert_type(m8 + i8, np.timedelta64) +assert_type(m8 + i, np.timedelta64) # type: ignore[deprecated] +assert_type(m8 + i8, np.timedelta64) # type: ignore[deprecated] assert_type(m8 - m8, np.timedelta64) -assert_type(m8 - i, np.timedelta64) -assert_type(m8 - i8, np.timedelta64) +assert_type(m8 - i, np.timedelta64) # type: ignore[deprecated] +assert_type(m8 - i8, np.timedelta64) # type: ignore[deprecated] assert_type(m8 * f, np.timedelta64) assert_type(m8 * f4, np.timedelta64) assert_type(m8 * np.True_, np.timedelta64) @@ -428,20 +432,23 @@ assert_type(m8 / f4, np.timedelta64) assert_type(m8 / m8, np.float64) assert_type(m8 // m8, np.int64) assert_type(m8 % m8, np.timedelta64) -assert_type(divmod(m8, m8), tuple[np.int64, np.timedelta64]) +# NOTE: Mypy incorrectly infers `tuple[Any, ...]`, but pyright behaves correctly. +assert_type(divmod(m8, m8), tuple[np.int64, np.timedelta64]) # type: ignore[assert-type] assert_type(m8_none + m8, np.timedelta64[None]) -assert_type(m8_none + i, np.timedelta64[None]) -assert_type(m8_none + i8, np.timedelta64[None]) -assert_type(m8_none - i, np.timedelta64[None]) -assert_type(m8_none - i8, np.timedelta64[None]) +assert_type(m8_none + i, np.timedelta64[None]) # type: ignore[deprecated] +assert_type(m8_none + i8, np.timedelta64[None]) # type: ignore[deprecated] +assert_type(m8_none - i, np.timedelta64[None]) # type: ignore[deprecated] +assert_type(m8_none - i8, np.timedelta64[None]) # type: ignore[deprecated] -assert_type(m8_int + i, np.timedelta64[int]) +assert_type(m8_int + i, np.timedelta64[int]) # type: ignore[deprecated] assert_type(m8_int + m8_delta, np.timedelta64[int]) -assert_type(m8_int + m8, np.timedelta64[int | None]) -assert_type(m8_int - i, np.timedelta64[int]) +assert_type(m8_int + m8, np.timedelta64) +assert_type(m8_int - i, np.timedelta64[int]) # type: ignore[deprecated] assert_type(m8_int - m8_delta, np.timedelta64[int]) -assert_type(m8_int - m8, np.timedelta64[int | None]) +assert_type(m8_int - m8_int, np.timedelta64[int]) +assert_type(m8_int - m8_none, np.timedelta64[None]) +assert_type(m8_int - m8, np.timedelta64) assert_type(m8_delta + date, dt.date) assert_type(m8_delta + time, dt.datetime) @@ -503,7 +510,7 @@ assert_type(f16 + c16, np.complexfloating) assert_type(c16 + c16, np.complex128) assert_type(f8 + c16, np.complex128) assert_type(i8 + c16, np.complex128) -assert_type(c8 + c16, np.complex128 | np.complex64) +assert_type(c8 + c16, np.complex128) assert_type(f4 + c16, np.complexfloating) assert_type(i4 + c16, np.complex128) assert_type(b_ + c16, np.complex128) @@ -513,7 +520,7 @@ assert_type(f + c16, np.complex128) assert_type(AR_f + c16, npt.NDArray[np.complex128]) assert_type(c8 + f16, np.complex64 | np.complexfloating[_128Bit, _128Bit]) -assert_type(c8 + c16, np.complex64 | np.complex128) +assert_type(c8 + c16, np.complex128) assert_type(c8 + f8, np.complex64 | np.complex128) assert_type(c8 + i8, np.complex64 | np.complexfloating[_64Bit, _64Bit]) assert_type(c8 + c8, np.complex64) @@ -554,16 +561,16 @@ assert_type(f8 + AR_f, npt.NDArray[np.float64]) assert_type(f16 + f8, np.floating) assert_type(f8 + f8, np.float64) assert_type(i8 + f8, np.float64) -assert_type(f4 + f8, np.floating) +assert_type(f4 + f8, np.float64) assert_type(i4 + f8, np.float64) assert_type(b_ + f8, np.float64) assert_type(b + f8, np.float64) -assert_type(c + f8, np.complex128 | np.float64) +assert_type(c + f8, complex) assert_type(f + f8, np.float64) assert_type(AR_f + f8, npt.NDArray[np.float64]) assert_type(f4 + f16, np.floating) -assert_type(f4 + f8, np.floating) +assert_type(f4 + f8, np.float64) assert_type(f4 + i8, np.floating) assert_type(f4 + f4, np.float32) assert_type(f4 + i4, np.floating) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index ba8fc4db23c9..910755ab0877 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -1,17 +1,19 @@ -import sys from collections import deque from pathlib import Path -from typing import Any, Generic, TypeVar, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt +from numpy._typing import _AnyShape -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] -class SubClass(npt.NDArray[_ScalarT_co]): ... +class SubClass[ScalarT: np.generic](np.ndarray[_AnyShape, np.dtype[ScalarT]]): ... -class IntoSubClass(Generic[_ScalarT_co]): - def __array__(self) -> SubClass[_ScalarT_co]: ... +class IntoSubClass[ScalarT: np.generic]: + def __array__(self) -> SubClass[ScalarT]: ... i8: np.int64 @@ -21,6 +23,20 @@ C: list[int] D: SubClass[np.float64 | np.int64] E: IntoSubClass[np.float64 | np.int64] +_f32_0d: np.float32 +_f32_1d: _Array1D[np.float32] +_f32_2d: _Array2D[np.float32] +_f32_3d: _Array3D[np.float32] + +_py_b_1d: list[bool] +_py_b_2d: list[list[bool]] +_py_i_1d: list[int] +_py_i_2d: list[list[int]] +_py_f_1d: list[float] +_py_f_2d: list[list[float]] +_py_c_1d: list[complex] +_py_c_2d: list[list[complex]] + mixed_shape: tuple[int, np.int64] def func(i: int, j: int, **kwargs: Any) -> SubClass[np.float64]: ... @@ -70,9 +86,24 @@ assert_type(np.concatenate([1, 1.0], out=A), npt.NDArray[np.float64]) assert_type(np.asarray(A), npt.NDArray[np.float64]) assert_type(np.asarray(B), npt.NDArray[np.float64]) -assert_type(np.asarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.asarray(C), _Array1D[np.int_]) assert_type(np.asarray(A, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.asarray(A, dtype="c16"), npt.NDArray[Any]) +assert_type(np.asarray(_f32_1d), _Array1D[np.float32]) +assert_type(np.asarray(_f32_1d, dtype=np.float64), _Array1D[np.float64]) +assert_type(np.asarray(_f32_1d, dtype="f8"), _Array1D[Any]) +assert_type(np.asarray(_py_b_1d), _Array1D[np.bool_]) +assert_type(np.asarray(_py_b_2d), _Array2D[np.bool_]) +assert_type(np.asarray(_py_i_1d), _Array1D[np.int_]) +assert_type(np.asarray(_py_i_2d), _Array2D[np.int_]) +assert_type(np.asarray(_py_f_1d), _Array1D[np.float64]) +assert_type(np.asarray(_py_f_2d), _Array2D[np.float64]) +assert_type(np.asarray(_py_c_1d), _Array1D[np.complex128]) +assert_type(np.asarray(_py_c_2d), _Array2D[np.complex128]) +assert_type(np.asarray(_py_i_1d, dtype=np.float32), _Array1D[np.float32]) +assert_type(np.asarray(_py_i_1d, dtype="f4"), _Array1D[Any]) +assert_type(np.asarray(_py_i_2d, dtype=np.float32), _Array2D[np.float32]) +assert_type(np.asarray(_py_i_2d, dtype="f4"), _Array2D[Any]) assert_type(np.asanyarray(A), npt.NDArray[np.float64]) assert_type(np.asanyarray(B), SubClass[np.float64]) @@ -85,53 +116,63 @@ assert_type(np.ascontiguousarray(B), npt.NDArray[np.float64]) assert_type(np.ascontiguousarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.ascontiguousarray(A, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.ascontiguousarray(A, dtype="c16"), npt.NDArray[Any]) +assert_type(np.ascontiguousarray(_f32_1d), _Array1D[np.float32]) assert_type(np.asfortranarray(A), npt.NDArray[np.float64]) assert_type(np.asfortranarray(B), npt.NDArray[np.float64]) assert_type(np.asfortranarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asfortranarray(A, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.asfortranarray(A, dtype="c16"), npt.NDArray[Any]) - -assert_type(np.fromstring("1 1 1", sep=" "), npt.NDArray[np.float64]) -assert_type(np.fromstring(b"1 1 1", sep=" "), npt.NDArray[np.float64]) -assert_type(np.fromstring("1 1 1", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) -assert_type(np.fromstring(b"1 1 1", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) -assert_type(np.fromstring("1 1 1", dtype="c16", sep=" "), npt.NDArray[Any]) -assert_type(np.fromstring(b"1 1 1", dtype="c16", sep=" "), npt.NDArray[Any]) - -assert_type(np.fromfile("test.txt", sep=" "), npt.NDArray[np.float64]) -assert_type(np.fromfile("test.txt", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) -assert_type(np.fromfile("test.txt", dtype="c16", sep=" "), npt.NDArray[Any]) +assert_type(np.asfortranarray(_f32_1d), _Array1D[np.float32]) + +assert_type(np.fromstring("1 1 1", sep=" "), _Array1D[np.float64]) +assert_type(np.fromstring(b"1 1 1", sep=" "), _Array1D[np.float64]) +assert_type(np.fromstring("1 1 1", dtype=np.int64, sep=" "), _Array1D[np.int64]) +assert_type(np.fromstring(b"1 1 1", dtype=np.int64, sep=" "), _Array1D[np.int64]) +assert_type(np.fromstring("1 1 1", dtype="c16", sep=" "), _Array1D[Any]) +assert_type(np.fromstring(b"1 1 1", dtype="c16", sep=" "), _Array1D[Any]) + +assert_type(np.fromfile("test.txt", sep=" "), _Array1D[np.float64]) +assert_type(np.fromfile("test.txt", dtype=np.int64, sep=" "), _Array1D[np.int64]) +assert_type(np.fromfile("test.txt", dtype="c16", sep=" "), _Array1D[Any]) with open("test.txt") as f: - assert_type(np.fromfile(f, sep=" "), npt.NDArray[np.float64]) - assert_type(np.fromfile(b"test.txt", sep=" "), npt.NDArray[np.float64]) - assert_type(np.fromfile(Path("test.txt"), sep=" "), npt.NDArray[np.float64]) - -assert_type(np.fromiter("12345", np.float64), npt.NDArray[np.float64]) -assert_type(np.fromiter("12345", float), npt.NDArray[Any]) - -assert_type(np.frombuffer(A), npt.NDArray[np.float64]) -assert_type(np.frombuffer(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.frombuffer(A, dtype="c16"), npt.NDArray[Any]) + assert_type(np.fromfile(f, sep=" "), _Array1D[np.float64]) + assert_type(np.fromfile(b"test.txt", sep=" "), _Array1D[np.float64]) + assert_type(np.fromfile(Path("test.txt"), sep=" "), _Array1D[np.float64]) + +assert_type(np.fromiter("12345", np.float32), _Array1D[np.float32]) +assert_type(np.fromiter("12345", np.float64), _Array1D[np.float64]) +assert_type(np.fromiter("12345", bool), _Array1D[np.bool]) +assert_type(np.fromiter("12345", int), _Array1D[np.int_ | Any]) +assert_type(np.fromiter("12345", float), _Array1D[np.float64 | Any]) +assert_type(np.fromiter("12345", complex), _Array1D[np.complex128 | Any]) +assert_type(np.fromiter("12345", None), _Array1D[np.float64]) +assert_type(np.fromiter("12345", object), _Array1D[Any]) + +assert_type(np.frombuffer(A), _Array1D[np.float64]) +assert_type(np.frombuffer(A, dtype=np.int64), _Array1D[np.int64]) +assert_type(np.frombuffer(A, dtype="c16"), _Array1D[Any]) _x_bool: bool _x_int: int _x_float: float -_x_timedelta: np.timedelta64 -_x_datetime: np.datetime64 - -assert_type(np.arange(False, True), np.ndarray[tuple[int], np.dtype[np.int_]]) -assert_type(np.arange(10), np.ndarray[tuple[int], np.dtype[np.int_]]) -assert_type(np.arange(0, 10, step=2), np.ndarray[tuple[int], np.dtype[np.int_]]) -assert_type(np.arange(10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) -assert_type(np.arange(0, stop=10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) -assert_type(np.arange(_x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64[Any]]]) -assert_type(np.arange(0, _x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64[Any]]]) -assert_type(np.arange(_x_datetime, _x_datetime), np.ndarray[tuple[int], np.dtype[np.datetime64[Any]]]) -assert_type(np.arange(10, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) -assert_type(np.arange(0, 10, step=2, dtype=np.int16), np.ndarray[tuple[int], np.dtype[np.int16]]) -assert_type(np.arange(10, dtype=int), np.ndarray[tuple[int], np.dtype[np.int_]]) -assert_type(np.arange(0, 10, dtype="f8"), np.ndarray[tuple[int], np.dtype]) +_x_timedelta: np.timedelta64[int] +_x_datetime: np.datetime64[int] + +assert_type(np.arange(False, True), _Array1D[np.int_]) +assert_type(np.arange(10), _Array1D[np.int_]) +assert_type(np.arange(0, 10, step=2), _Array1D[np.int_]) +assert_type(np.arange(10.0), _Array1D[np.float64 | Any]) +assert_type(np.arange(0, stop=10.0), _Array1D[np.float64 | Any]) +assert_type(np.arange(_x_timedelta), _Array1D[np.timedelta64]) +assert_type(np.arange(0, _x_timedelta), _Array1D[np.timedelta64]) +assert_type(np.arange(_x_datetime, _x_datetime), _Array1D[np.datetime64]) +assert_type(np.arange(10, dtype=np.float64), _Array1D[np.float64]) +assert_type(np.arange(0, 10, step=2, dtype=np.int16), _Array1D[np.int16]) +assert_type(np.arange(10, dtype=int), _Array1D[np.int_]) +assert_type(np.arange(0, 10, dtype="f8"), _Array1D[Any]) +# https://github.com/numpy/numpy/issues/30628 +assert_type(np.arange("2025-12-20", "2025-12-23", dtype="datetime64[D]"), _Array1D[np.datetime64]) assert_type(np.require(A), npt.NDArray[np.float64]) assert_type(np.require(B), SubClass[np.float64]) @@ -145,23 +186,23 @@ assert_type(np.require(B, requirements="W"), SubClass[np.float64]) assert_type(np.require(B, requirements="A"), SubClass[np.float64]) assert_type(np.require(C), npt.NDArray[Any]) -assert_type(np.linspace(0, 10), npt.NDArray[np.float64]) -assert_type(np.linspace(0, 10j), npt.NDArray[np.complexfloating]) -assert_type(np.linspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.linspace(0, 10), _Array1D[np.float64]) +assert_type(np.linspace(0, 10j), _Array1D[np.complex128 | Any]) +assert_type(np.linspace(0, 10, dtype=np.int64), _Array1D[np.int64]) assert_type(np.linspace(0, 10, dtype=int), npt.NDArray[Any]) -assert_type(np.linspace(0, 10, retstep=True), tuple[npt.NDArray[np.float64], np.float64]) -assert_type(np.linspace(0j, 10, retstep=True), tuple[npt.NDArray[np.complexfloating], np.complexfloating]) -assert_type(np.linspace(0, 10, retstep=True, dtype=np.int64), tuple[npt.NDArray[np.int64], np.int64]) +assert_type(np.linspace(0, 10, retstep=True), tuple[_Array1D[np.float64], np.float64]) +assert_type(np.linspace(0j, 10, retstep=True), tuple[_Array1D[np.complex128 | Any], np.complex128 | Any]) +assert_type(np.linspace(0, 10, retstep=True, dtype=np.int64), tuple[_Array1D[np.int64], np.int64]) assert_type(np.linspace(0j, 10, retstep=True, dtype=int), tuple[npt.NDArray[Any], Any]) -assert_type(np.logspace(0, 10), npt.NDArray[np.float64]) -assert_type(np.logspace(0, 10j), npt.NDArray[np.complexfloating]) -assert_type(np.logspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.logspace(0, 10), _Array1D[np.float64]) +assert_type(np.logspace(0, 10j), _Array1D[np.complex128 | Any]) +assert_type(np.logspace(0, 10, dtype=np.int64), _Array1D[np.int64]) assert_type(np.logspace(0, 10, dtype=int), npt.NDArray[Any]) -assert_type(np.geomspace(0, 10), npt.NDArray[np.float64]) -assert_type(np.geomspace(0, 10j), npt.NDArray[np.complexfloating]) -assert_type(np.geomspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.geomspace(0, 10), _Array1D[np.float64]) +assert_type(np.geomspace(0, 10j), _Array1D[np.complex128 | Any]) +assert_type(np.geomspace(0, 10, dtype=np.int64), _Array1D[np.int64]) assert_type(np.geomspace(0, 10, dtype=int), npt.NDArray[Any]) assert_type(np.zeros_like(A), npt.NDArray[np.float64]) @@ -234,6 +275,10 @@ assert_type(np.identity(3, dtype="complex"), np.ndarray[tuple[int, int], np.dtyp assert_type(np.identity(3, dtype="c16"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) assert_type(np.identity(3, dtype="D"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.atleast_1d(_f32_0d), _Array1D[np.float32]) +assert_type(np.atleast_1d(_f32_1d), _Array1D[np.float32]) +assert_type(np.atleast_1d(_f32_2d), _Array2D[np.float32]) +assert_type(np.atleast_1d(_f32_3d), _Array3D[np.float32]) assert_type(np.atleast_1d(A), npt.NDArray[np.float64]) assert_type(np.atleast_1d(C), npt.NDArray[Any]) assert_type(np.atleast_1d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) @@ -242,10 +287,18 @@ assert_type(np.atleast_1d(C, C), tuple[npt.NDArray[Any], npt.NDArray[Any]]) assert_type(np.atleast_1d(A, A, A), tuple[npt.NDArray[np.float64], ...]) assert_type(np.atleast_1d(C, C, C), tuple[npt.NDArray[Any], ...]) +assert_type(np.atleast_2d(_f32_0d), _Array2D[np.float32]) +assert_type(np.atleast_2d(_f32_1d), _Array2D[np.float32]) +assert_type(np.atleast_2d(_f32_2d), _Array2D[np.float32]) +assert_type(np.atleast_2d(_f32_3d), _Array3D[np.float32]) assert_type(np.atleast_2d(A), npt.NDArray[np.float64]) assert_type(np.atleast_2d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(np.atleast_2d(A, A, A), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.atleast_3d(_f32_0d), _Array3D[np.float32]) +assert_type(np.atleast_3d(_f32_1d), _Array3D[np.float32]) +assert_type(np.atleast_3d(_f32_2d), _Array3D[np.float32]) +assert_type(np.atleast_3d(_f32_3d), _Array3D[np.float32]) assert_type(np.atleast_3d(A), npt.NDArray[np.float64]) assert_type(np.atleast_3d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(np.atleast_3d(A, A, A), tuple[npt.NDArray[np.float64], ...]) @@ -268,10 +321,9 @@ assert_type(np.stack([A, A], out=B), SubClass[np.float64]) assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] assert_type(np.block(C), npt.NDArray[Any]) -if sys.version_info >= (3, 12): - from collections.abc import Buffer +from collections.abc import Buffer - def create_array(obj: npt.ArrayLike) -> npt.NDArray[Any]: ... +def create_array(obj: npt.ArrayLike) -> npt.NDArray[Any]: ... - buffer: Buffer - assert_type(create_array(buffer), npt.NDArray[Any]) +buffer: Buffer +assert_type(create_array(buffer), npt.NDArray[Any]) diff --git a/numpy/typing/tests/data/reveal/arraypad.pyi b/numpy/typing/tests/data/reveal/arraypad.pyi index 3d53d913a770..bbf843da0042 100644 --- a/numpy/typing/tests/data/reveal/arraypad.pyi +++ b/numpy/typing/tests/data/reveal/arraypad.pyi @@ -13,6 +13,8 @@ def mode_func( AR_i8: npt.NDArray[np.int64] AR_f8: npt.NDArray[np.float64] +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] AR_LIKE: list[int] assert_type(np.pad(AR_i8, (2, 3), "constant"), npt.NDArray[np.int64]) @@ -25,3 +27,6 @@ assert_type(np.pad(AR_i8, {-1: (2, 3)}), npt.NDArray[np.int64]) assert_type(np.pad(AR_i8, {-2: 4}), npt.NDArray[np.int64]) pad_width: dict[int, int | tuple[int, int]] = {-1: (2, 3), -2: 4} assert_type(np.pad(AR_i8, pad_width), npt.NDArray[np.int64]) + +assert_type(np.pad(AR_f8_1d, (2, 3)), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.pad(AR_f8_2d, (2, 3)), np.ndarray[tuple[int, int], np.dtype[np.float64]]) diff --git a/numpy/typing/tests/data/reveal/arraysetops.pyi b/numpy/typing/tests/data/reveal/arraysetops.pyi index 7e5ca5c5717b..ee74eedd61c7 100644 --- a/numpy/typing/tests/data/reveal/arraysetops.pyi +++ b/numpy/typing/tests/data/reveal/arraysetops.pyi @@ -8,6 +8,8 @@ from numpy.lib._arraysetops_impl import ( UniqueInverseResult, ) +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] + AR_b: npt.NDArray[np.bool] AR_i8: npt.NDArray[np.int64] AR_f8: npt.NDArray[np.float64] @@ -16,36 +18,40 @@ AR_O: npt.NDArray[np.object_] AR_LIKE_f8: list[float] -assert_type(np.ediff1d(AR_b), npt.NDArray[np.int8]) -assert_type(np.ediff1d(AR_i8, to_end=[1, 2, 3]), npt.NDArray[np.int64]) -assert_type(np.ediff1d(AR_M), npt.NDArray[np.timedelta64]) -assert_type(np.ediff1d(AR_O), npt.NDArray[np.object_]) -assert_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5]), npt.NDArray[Any]) +assert_type(np.ediff1d(AR_b), _Array1D[np.int8]) +assert_type(np.ediff1d(AR_i8, to_end=[1, 2, 3]), _Array1D[np.int64]) +assert_type(np.ediff1d(AR_M), _Array1D[np.timedelta64]) +assert_type(np.ediff1d(AR_O), _Array1D[np.object_]) +assert_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5]), _Array1D[Any]) -assert_type(np.intersect1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) -assert_type(np.intersect1d(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type(np.intersect1d(AR_i8, AR_i8), _Array1D[np.int64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), _Array1D[np.datetime64]) # type: ignore[assert-type] +assert_type(np.intersect1d(AR_f8, AR_i8), _Array1D[Any]) assert_type( np.intersect1d(AR_f8, AR_f8, return_indices=True), - tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]], + tuple[_Array1D[np.float64], _Array1D[np.intp], _Array1D[np.intp]], ) -assert_type(np.setxor1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) -assert_type(np.setxor1d(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type(np.setxor1d(AR_i8, AR_i8), _Array1D[np.int64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), _Array1D[np.datetime64]) # type: ignore[assert-type] +assert_type(np.setxor1d(AR_f8, AR_i8), _Array1D[Any]) assert_type(np.isin(AR_i8, AR_i8), npt.NDArray[np.bool]) assert_type(np.isin(AR_M, AR_M, assume_unique=True), npt.NDArray[np.bool]) assert_type(np.isin(AR_f8, AR_i8), npt.NDArray[np.bool]) assert_type(np.isin(AR_f8, AR_LIKE_f8, invert=True), npt.NDArray[np.bool]) -assert_type(np.union1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.union1d(AR_M, AR_M), npt.NDArray[np.datetime64]) -assert_type(np.union1d(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type(np.union1d(AR_i8, AR_i8), _Array1D[np.int64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.union1d(AR_M, AR_M), _Array1D[np.datetime64]) # type: ignore[assert-type] +assert_type(np.union1d(AR_f8, AR_i8), _Array1D[Any]) -assert_type(np.setdiff1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.setdiff1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) -assert_type(np.setdiff1d(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type(np.setdiff1d(AR_i8, AR_i8), _Array1D[np.int64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.setdiff1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) # type: ignore[assert-type] +assert_type(np.setdiff1d(AR_f8, AR_i8), _Array1D[Any]) assert_type(np.unique(AR_f8), npt.NDArray[np.float64]) assert_type(np.unique(AR_LIKE_f8, axis=0), npt.NDArray[Any]) @@ -70,5 +76,5 @@ assert_type(np.unique_counts(AR_f8), UniqueCountsResult[np.float64]) assert_type(np.unique_counts(AR_LIKE_f8), UniqueCountsResult[Any]) assert_type(np.unique_inverse(AR_f8), UniqueInverseResult[np.float64]) assert_type(np.unique_inverse(AR_LIKE_f8), UniqueInverseResult[Any]) -assert_type(np.unique_values(AR_f8), npt.NDArray[np.float64]) -assert_type(np.unique_values(AR_LIKE_f8), npt.NDArray[Any]) +assert_type(np.unique_values(AR_f8), _Array1D[np.float64]) +assert_type(np.unique_values(AR_LIKE_f8), _Array1D[Any]) diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/numpy/typing/tests/data/reveal/bitwise_ops.pyi index 49986bd5d12c..809f77d9736d 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.pyi +++ b/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -1,10 +1,10 @@ -from typing import Literal as L, TypeAlias, assert_type +from typing import Literal as L, assert_type import numpy as np import numpy.typing as npt -FalseType: TypeAlias = L[False] -TrueType: TypeAlias = L[True] +type FalseType = L[False] +type TrueType = L[True] i4: np.int32 i8: np.int64 diff --git a/numpy/typing/tests/data/reveal/char.pyi b/numpy/typing/tests/data/reveal/char.pyi index 2fba2feae385..725807b9e8e3 100644 --- a/numpy/typing/tests/data/reveal/char.pyi +++ b/numpy/typing/tests/data/reveal/char.pyi @@ -1,11 +1,11 @@ -from typing import TypeAlias, assert_type +from typing import assert_type import numpy as np import numpy._typing as np_t import numpy.typing as npt -AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] -AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] +type AR_T_alias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +type AR_TU_alias = AR_T_alias | npt.NDArray[np.str_] AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] @@ -162,48 +162,51 @@ assert_type(np.char.rindex(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.char.rindex(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) assert_type(np.char.rindex(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) -assert_type(np.char.isalpha(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.isalpha(AR_S), npt.NDArray[np.bool]) -assert_type(np.char.isalpha(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.isalpha(AR_U), np.ndarray) +assert_type(np.char.isalpha(AR_S), np.ndarray) +assert_type(np.char.isalpha(AR_T), np.ndarray) -assert_type(np.char.isalnum(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.isalnum(AR_S), npt.NDArray[np.bool]) -assert_type(np.char.isalnum(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.isalnum(AR_U), np.ndarray) +assert_type(np.char.isalnum(AR_S), np.ndarray) +assert_type(np.char.isalnum(AR_T), np.ndarray) -assert_type(np.char.isdecimal(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.isdecimal(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.isdecimal(AR_U), np.ndarray) +assert_type(np.char.isdecimal(AR_T), np.ndarray) -assert_type(np.char.isdigit(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.isdigit(AR_S), npt.NDArray[np.bool]) -assert_type(np.char.isdigit(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.isdigit(AR_U), np.ndarray) +assert_type(np.char.isdigit(AR_S), np.ndarray) +assert_type(np.char.isdigit(AR_T), np.ndarray) -assert_type(np.char.islower(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.islower(AR_S), npt.NDArray[np.bool]) -assert_type(np.char.islower(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.islower(AR_U), np.ndarray) +assert_type(np.char.islower(AR_S), np.ndarray) +assert_type(np.char.islower(AR_T), np.ndarray) -assert_type(np.char.isnumeric(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.isnumeric(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.isnumeric(AR_U), np.ndarray) +assert_type(np.char.isnumeric(AR_T), np.ndarray) -assert_type(np.char.isspace(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.isspace(AR_S), npt.NDArray[np.bool]) -assert_type(np.char.isspace(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.isspace(AR_U), np.ndarray) +assert_type(np.char.isspace(AR_S), np.ndarray) +assert_type(np.char.isspace(AR_T), np.ndarray) -assert_type(np.char.istitle(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.istitle(AR_S), npt.NDArray[np.bool]) -assert_type(np.char.istitle(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.istitle(AR_U), np.ndarray) +assert_type(np.char.istitle(AR_S), np.ndarray) +assert_type(np.char.istitle(AR_T), np.ndarray) -assert_type(np.char.isupper(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.isupper(AR_S), npt.NDArray[np.bool]) -assert_type(np.char.isupper(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.isupper(AR_U), np.ndarray) +assert_type(np.char.isupper(AR_S), np.ndarray) +assert_type(np.char.isupper(AR_T), np.ndarray) -assert_type(np.char.str_len(AR_U), npt.NDArray[np.int_]) -assert_type(np.char.str_len(AR_S), npt.NDArray[np.int_]) -assert_type(np.char.str_len(AR_T), npt.NDArray[np.int_]) +assert_type(np.char.str_len(AR_U), np.ndarray) +assert_type(np.char.str_len(AR_S), np.ndarray) +assert_type(np.char.str_len(AR_T), np.ndarray) assert_type(np.char.translate(AR_U, ""), npt.NDArray[np.str_]) assert_type(np.char.translate(AR_S, ""), npt.NDArray[np.bytes_]) assert_type(np.char.translate(AR_T, ""), AR_T_alias) +# mypy: disable-error-code="deprecated" +# pyright: reportDeprecated=false + assert_type(np.char.array(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) assert_type(np.char.array(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.array("bob", copy=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) diff --git a/numpy/typing/tests/data/reveal/chararray.pyi b/numpy/typing/tests/data/reveal/chararray.pyi index 5c3dc85038db..771276e41110 100644 --- a/numpy/typing/tests/data/reveal/chararray.pyi +++ b/numpy/typing/tests/data/reveal/chararray.pyi @@ -1,10 +1,10 @@ -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt -_BytesCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] -_StrCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] +type _BytesCharArray = np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] # type: ignore[deprecated] +type _StrCharArray = np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] # type: ignore[deprecated] AR_U: _StrCharArray AR_S: _BytesCharArray diff --git a/numpy/typing/tests/data/reveal/comparisons.pyi b/numpy/typing/tests/data/reveal/comparisons.pyi index 6df5a3d94314..f9e2a7ee0519 100644 --- a/numpy/typing/tests/data/reveal/comparisons.pyi +++ b/numpy/typing/tests/data/reveal/comparisons.pyi @@ -21,7 +21,7 @@ td = np.timedelta64(0, "D") b_ = np.bool() b = False -c = complex() +c = 0j f = 0.0 i = 0 @@ -42,15 +42,15 @@ assert_type(i8 > [decimal.Decimal("1.5")], npt.NDArray[np.bool]) assert_type(dt > dt, np.bool) assert_type(td > td, np.bool) -assert_type(td > i, np.bool) -assert_type(td > i4, np.bool) -assert_type(td > i8, np.bool) +assert_type(td > i, np.bool) # type: ignore[deprecated] +assert_type(td > i4, np.bool) # type: ignore[deprecated] +assert_type(td > i8, np.bool) # type: ignore[deprecated] -assert_type(td > AR, npt.NDArray[np.bool]) -assert_type(td > SEQ, npt.NDArray[np.bool]) +assert_type(td > AR, npt.NDArray[np.bool]) # type: ignore[deprecated] +assert_type(td > SEQ, npt.NDArray[np.bool]) # type: ignore[deprecated] assert_type(AR > SEQ, npt.NDArray[np.bool]) assert_type(AR > td, npt.NDArray[np.bool]) -assert_type(SEQ > td, npt.NDArray[np.bool]) +assert_type(SEQ > td, npt.NDArray[np.bool]) # type: ignore[deprecated] assert_type(SEQ > AR, npt.NDArray[np.bool]) # boolean diff --git a/numpy/typing/tests/data/reveal/ctypeslib.pyi b/numpy/typing/tests/data/reveal/ctypeslib.pyi index 0564d725cf62..e3558925e4d0 100644 --- a/numpy/typing/tests/data/reveal/ctypeslib.pyi +++ b/numpy/typing/tests/data/reveal/ctypeslib.pyi @@ -6,19 +6,17 @@ import numpy.typing as npt from numpy import ctypeslib AR_bool: npt.NDArray[np.bool] -AR_ubyte: npt.NDArray[np.ubyte] -AR_ushort: npt.NDArray[np.ushort] -AR_uintc: npt.NDArray[np.uintc] -AR_ulong: npt.NDArray[np.ulong] -AR_ulonglong: npt.NDArray[np.ulonglong] -AR_byte: npt.NDArray[np.byte] -AR_short: npt.NDArray[np.short] -AR_intc: npt.NDArray[np.intc] -AR_long: npt.NDArray[np.long] -AR_longlong: npt.NDArray[np.longlong] -AR_single: npt.NDArray[np.single] -AR_double: npt.NDArray[np.double] -AR_longdouble: npt.NDArray[np.longdouble] +AR_i8: npt.NDArray[np.int8] +AR_u8: npt.NDArray[np.uint8] +AR_i16: npt.NDArray[np.int16] +AR_u16: npt.NDArray[np.uint16] +AR_i32: npt.NDArray[np.int32] +AR_u32: npt.NDArray[np.uint32] +AR_i64: npt.NDArray[np.int64] +AR_u64: npt.NDArray[np.uint64] +AR_f32: npt.NDArray[np.float32] +AR_f64: npt.NDArray[np.float64] +AR_f80: npt.NDArray[np.longdouble] AR_void: npt.NDArray[np.void] pointer: ct._Pointer[Any] @@ -33,49 +31,56 @@ assert_type(np.ctypeslib.ndpointer(np.int64, shape=(10, 3)), type[ctypeslib._con assert_type(np.ctypeslib.ndpointer(int, shape=(1,)), type[np.ctypeslib._concrete_ndptr[np.dtype]]) assert_type(np.ctypeslib.as_ctypes_type(np.bool), type[ct.c_bool]) -assert_type(np.ctypeslib.as_ctypes_type(np.ubyte), type[ct.c_ubyte]) -assert_type(np.ctypeslib.as_ctypes_type(np.ushort), type[ct.c_ushort]) -assert_type(np.ctypeslib.as_ctypes_type(np.uintc), type[ct.c_uint]) -assert_type(np.ctypeslib.as_ctypes_type(np.byte), type[ct.c_byte]) -assert_type(np.ctypeslib.as_ctypes_type(np.short), type[ct.c_short]) -assert_type(np.ctypeslib.as_ctypes_type(np.intc), type[ct.c_int]) -assert_type(np.ctypeslib.as_ctypes_type(np.single), type[ct.c_float]) -assert_type(np.ctypeslib.as_ctypes_type(np.double), type[ct.c_double]) -assert_type(np.ctypeslib.as_ctypes_type(ct.c_double), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type(np.int8), type[ct.c_int8]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint8), type[ct.c_uint8]) +assert_type(np.ctypeslib.as_ctypes_type(np.int16), type[ct.c_int16]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint16), type[ct.c_uint16]) +assert_type(np.ctypeslib.as_ctypes_type(np.int32), type[ct.c_int32]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint32), type[ct.c_uint32]) +assert_type(np.ctypeslib.as_ctypes_type(np.int64), type[ct.c_int64]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint64), type[ct.c_uint64]) +assert_type(np.ctypeslib.as_ctypes_type(np.float32), type[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes_type(np.float64), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type(np.longdouble), type[ct.c_longdouble]) +assert_type(np.ctypeslib.as_ctypes_type("?"), type[ct.c_bool]) +assert_type(np.ctypeslib.as_ctypes_type("intp"), type[ct.c_ssize_t]) assert_type(np.ctypeslib.as_ctypes_type("q"), type[ct.c_longlong]) +assert_type(np.ctypeslib.as_ctypes_type("i8"), type[ct.c_int64]) +assert_type(np.ctypeslib.as_ctypes_type("f8"), type[ct.c_double]) assert_type(np.ctypeslib.as_ctypes_type([("i8", np.int64), ("f8", np.float64)]), type[Any]) -assert_type(np.ctypeslib.as_ctypes_type("i8"), type[Any]) -assert_type(np.ctypeslib.as_ctypes_type("f8"), type[Any]) assert_type(np.ctypeslib.as_ctypes(AR_bool.take(0)), ct.c_bool) -assert_type(np.ctypeslib.as_ctypes(AR_ubyte.take(0)), ct.c_ubyte) -assert_type(np.ctypeslib.as_ctypes(AR_ushort.take(0)), ct.c_ushort) -assert_type(np.ctypeslib.as_ctypes(AR_uintc.take(0)), ct.c_uint) +assert_type(np.ctypeslib.as_ctypes(AR_u8.take(0)), ct.c_uint8) +assert_type(np.ctypeslib.as_ctypes(AR_u16.take(0)), ct.c_uint16) +assert_type(np.ctypeslib.as_ctypes(AR_u32.take(0)), ct.c_uint32) -assert_type(np.ctypeslib.as_ctypes(AR_byte.take(0)), ct.c_byte) -assert_type(np.ctypeslib.as_ctypes(AR_short.take(0)), ct.c_short) -assert_type(np.ctypeslib.as_ctypes(AR_intc.take(0)), ct.c_int) -assert_type(np.ctypeslib.as_ctypes(AR_single.take(0)), ct.c_float) -assert_type(np.ctypeslib.as_ctypes(AR_double.take(0)), ct.c_double) -assert_type(np.ctypeslib.as_ctypes(AR_void.take(0)), Any) +assert_type(np.ctypeslib.as_ctypes(np.bool()), ct.c_bool) +assert_type(np.ctypeslib.as_ctypes(np.int8()), ct.c_int8) +assert_type(np.ctypeslib.as_ctypes(np.uint8()), ct.c_uint8) +assert_type(np.ctypeslib.as_ctypes(np.int16()), ct.c_int16) +assert_type(np.ctypeslib.as_ctypes(np.uint16()), ct.c_uint16) +assert_type(np.ctypeslib.as_ctypes(np.int32()), ct.c_int32) +assert_type(np.ctypeslib.as_ctypes(np.uint32()), ct.c_uint32) +assert_type(np.ctypeslib.as_ctypes(np.int64()), ct.c_int64) +assert_type(np.ctypeslib.as_ctypes(np.uint64()), ct.c_uint64) +assert_type(np.ctypeslib.as_ctypes(np.float32()), ct.c_float) +assert_type(np.ctypeslib.as_ctypes(np.float64()), ct.c_double) +assert_type(np.ctypeslib.as_ctypes(np.longdouble()), ct.c_longdouble) +assert_type(np.ctypeslib.as_ctypes(np.void(b"")), Any) assert_type(np.ctypeslib.as_ctypes(AR_bool), ct.Array[ct.c_bool]) -assert_type(np.ctypeslib.as_ctypes(AR_ubyte), ct.Array[ct.c_ubyte]) -assert_type(np.ctypeslib.as_ctypes(AR_ushort), ct.Array[ct.c_ushort]) -assert_type(np.ctypeslib.as_ctypes(AR_uintc), ct.Array[ct.c_uint]) -assert_type(np.ctypeslib.as_ctypes(AR_byte), ct.Array[ct.c_byte]) -assert_type(np.ctypeslib.as_ctypes(AR_short), ct.Array[ct.c_short]) -assert_type(np.ctypeslib.as_ctypes(AR_intc), ct.Array[ct.c_int]) -assert_type(np.ctypeslib.as_ctypes(AR_single), ct.Array[ct.c_float]) -assert_type(np.ctypeslib.as_ctypes(AR_double), ct.Array[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes(AR_i8), ct.Array[ct.c_int8]) +assert_type(np.ctypeslib.as_ctypes(AR_u8), ct.Array[ct.c_uint8]) +assert_type(np.ctypeslib.as_ctypes(AR_i16), ct.Array[ct.c_int16]) +assert_type(np.ctypeslib.as_ctypes(AR_u16), ct.Array[ct.c_uint16]) +assert_type(np.ctypeslib.as_ctypes(AR_i32), ct.Array[ct.c_int32]) +assert_type(np.ctypeslib.as_ctypes(AR_u32), ct.Array[ct.c_uint32]) +assert_type(np.ctypeslib.as_ctypes(AR_i64), ct.Array[ct.c_int64]) +assert_type(np.ctypeslib.as_ctypes(AR_u64), ct.Array[ct.c_uint64]) +assert_type(np.ctypeslib.as_ctypes(AR_f32), ct.Array[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes(AR_f64), ct.Array[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes(AR_f80), ct.Array[ct.c_longdouble]) assert_type(np.ctypeslib.as_ctypes(AR_void), ct.Array[Any]) -assert_type(np.ctypeslib.as_array(AR_ubyte), npt.NDArray[np.ubyte]) +assert_type(np.ctypeslib.as_array(AR_u8), npt.NDArray[np.ubyte]) assert_type(np.ctypeslib.as_array(1), npt.NDArray[Any]) assert_type(np.ctypeslib.as_array(pointer), npt.NDArray[Any]) - -assert_type(np.ctypeslib.as_ctypes_type(np.long), type[ct.c_long]) -assert_type(np.ctypeslib.as_ctypes_type(np.ulong), type[ct.c_ulong]) -assert_type(np.ctypeslib.as_ctypes(AR_ulong), ct.Array[ct.c_ulong]) -assert_type(np.ctypeslib.as_ctypes(AR_long), ct.Array[ct.c_long]) -assert_type(np.ctypeslib.as_ctypes(AR_long.take(0)), ct.c_long) -assert_type(np.ctypeslib.as_ctypes(AR_ulong.take(0)), ct.c_ulong) diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index 7f670b3be51c..c8c9e393f76e 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -2,13 +2,13 @@ import ctypes as ct import datetime as dt from decimal import Decimal from fractions import Fraction -from typing import Any, Literal, LiteralString, TypeAlias, assert_type +from typing import Any, Literal, LiteralString, assert_type import numpy as np from numpy.dtypes import StringDType # a combination of likely `object` dtype-like candidates (no `_co`) -_PyObjectLike: TypeAlias = Decimal | Fraction | dt.datetime | dt.timedelta +type _PyObjectLike = Decimal | Fraction | dt.datetime | dt.timedelta dtype_U: np.dtype[np.str_] dtype_V: np.dtype[np.void] @@ -44,9 +44,9 @@ assert_type(np.dtype("str"), np.dtype[np.str_]) # Python types assert_type(np.dtype(bool), np.dtype[np.bool]) -assert_type(np.dtype(int), np.dtype[np.int_ | np.bool]) -assert_type(np.dtype(float), np.dtype[np.float64 | np.int_ | np.bool]) -assert_type(np.dtype(complex), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(int), np.dtype[np.int_ | Any]) +assert_type(np.dtype(float), np.dtype[np.float64 | Any]) +assert_type(np.dtype(complex), np.dtype[np.complex128 | Any]) assert_type(np.dtype(py_object), np.dtype[np.object_]) assert_type(np.dtype(str), np.dtype[np.str_]) assert_type(np.dtype(bytes), np.dtype[np.bytes_]) @@ -64,10 +64,44 @@ assert_type(np.dtype(Fraction), np.dtype[np.object_]) assert_type(np.dtype("?"), np.dtype[np.bool]) assert_type(np.dtype("|b1"), np.dtype[np.bool]) assert_type(np.dtype("u1"), np.dtype[np.uint8]) -assert_type(np.dtype("l"), np.dtype[np.long]) +assert_type(np.dtype("l"), np.dtype[np.int32 | np.int64]) assert_type(np.dtype("longlong"), np.dtype[np.longlong]) assert_type(np.dtype(">g"), np.dtype[np.longdouble]) assert_type(np.dtype(cs_integer), np.dtype[np.integer]) +# char-codes - datetime64 +assert_type(np.dtype("datetime64[Y]"), np.dtype[np.datetime64[dt.date]]) +assert_type(np.dtype("datetime64[M]"), np.dtype[np.datetime64[dt.date]]) +assert_type(np.dtype("datetime64[W]"), np.dtype[np.datetime64[dt.date]]) +assert_type(np.dtype("datetime64[D]"), np.dtype[np.datetime64[dt.date]]) +assert_type(np.dtype("datetime64[h]"), np.dtype[np.datetime64[dt.datetime]]) +assert_type(np.dtype("datetime64[m]"), np.dtype[np.datetime64[dt.datetime]]) +assert_type(np.dtype("datetime64[s]"), np.dtype[np.datetime64[dt.datetime]]) +assert_type(np.dtype("datetime64[ms]"), np.dtype[np.datetime64[dt.datetime]]) +assert_type(np.dtype("datetime64[us]"), np.dtype[np.datetime64[dt.datetime]]) +assert_type(np.dtype("datetime64[ns]"), np.dtype[np.datetime64[int]]) +assert_type(np.dtype("datetime64[ps]"), np.dtype[np.datetime64[int]]) +assert_type(np.dtype("datetime64[fs]"), np.dtype[np.datetime64[int]]) +assert_type(np.dtype("datetime64[as]"), np.dtype[np.datetime64[int]]) +assert_type(np.dtype("datetime64"), np.dtype[np.datetime64]) +assert_type(np.dtype("M8"), np.dtype[np.datetime64]) +assert_type(np.dtype("M"), np.dtype[np.datetime64]) +# char-codes - timedelta64 +assert_type(np.dtype("timedelta64[Y]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64[M]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64[W]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[D]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[h]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[m]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[s]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[ms]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[us]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[ns]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64[ps]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64[fs]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64[as]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64"), np.dtype[np.timedelta64]) +assert_type(np.dtype("m8"), np.dtype[np.timedelta64]) +assert_type(np.dtype("m"), np.dtype[np.timedelta64]) # ctypes assert_type(np.dtype(ct.c_double), np.dtype[np.float64]) # see numpy/numpy#29155 diff --git a/numpy/typing/tests/data/reveal/fft.pyi b/numpy/typing/tests/data/reveal/fft.pyi index dacd2b89777c..c5875209603f 100644 --- a/numpy/typing/tests/data/reveal/fft.pyi +++ b/numpy/typing/tests/data/reveal/fft.pyi @@ -3,35 +3,187 @@ from typing import Any, assert_type import numpy as np import numpy.typing as npt -AR_f8: npt.NDArray[np.float64] -AR_c16: npt.NDArray[np.complex128] -AR_LIKE_f8: list[float] - -assert_type(np.fft.fftshift(AR_f8), npt.NDArray[np.float64]) -assert_type(np.fft.fftshift(AR_LIKE_f8, axes=0), npt.NDArray[Any]) - -assert_type(np.fft.ifftshift(AR_f8), npt.NDArray[np.float64]) -assert_type(np.fft.ifftshift(AR_LIKE_f8, axes=0), npt.NDArray[Any]) - -assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating]) -assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating]) - -assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating]) -assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating]) - -assert_type(np.fft.fft(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.ifft(AR_f8, axis=1), npt.NDArray[np.complex128]) -assert_type(np.fft.rfft(AR_f8, n=None), npt.NDArray[np.complex128]) -assert_type(np.fft.irfft(AR_f8, norm="ortho"), npt.NDArray[np.float64]) -assert_type(np.fft.hfft(AR_f8, n=2), npt.NDArray[np.float64]) -assert_type(np.fft.ihfft(AR_f8), npt.NDArray[np.complex128]) - -assert_type(np.fft.fftn(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.ifftn(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.rfftn(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.irfftn(AR_f8), npt.NDArray[np.float64]) - -assert_type(np.fft.rfft2(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.ifft2(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.fft2(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.irfft2(AR_f8), npt.NDArray[np.float64]) +### + +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] + +_f64_nd: npt.NDArray[np.float64] +_c128_nd: npt.NDArray[np.complex128] +_py_float_1d: list[float] +_py_complex_1d: list[complex] + +_i64: np.int64 +_f32: np.float16 +_f80: np.longdouble +_c64: np.complex64 +_c160: np.clongdouble + +_i64_2d: _Array2D[np.int64] +_f16_2d: _Array2D[np.float16] +_f32_2d: _Array2D[np.float32] +_f80_2d: _Array2D[np.longdouble] +_c64_2d: _Array2D[np.complex64] +_c160_2d: _Array2D[np.clongdouble] + +_i64_nd: npt.NDArray[np.int64] +_f32_nd: npt.NDArray[np.float32] +_f80_nd: npt.NDArray[np.longdouble] +_c64_nd: npt.NDArray[np.complex64] +_c160_nd: npt.NDArray[np.clongdouble] + +### + +# fftshift + +assert_type(np.fft.fftshift(_py_float_1d, axes=0), npt.NDArray[Any]) +assert_type(np.fft.fftshift(_f32_2d), _Array2D[np.float32]) +assert_type(np.fft.fftshift(_f64_nd), npt.NDArray[np.float64]) + +# ifftshift + +assert_type(np.fft.ifftshift(_f64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.ifftshift(_f32_2d), _Array2D[np.float32]) +assert_type(np.fft.ifftshift(_py_float_1d, axes=0), npt.NDArray[Any]) + +# fftfreq + +assert_type(np.fft.fftfreq(5), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, True), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, 1), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, 1.0), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, 1j), _Array1D[np.complex128 | Any]) + +assert_type(np.fft.fftfreq(5, _i64), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, _f32), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, _f80), _Array1D[np.longdouble]) +assert_type(np.fft.fftfreq(5, _c64), _Array1D[np.complex128]) +assert_type(np.fft.fftfreq(5, _c160), _Array1D[np.clongdouble]) + +assert_type(np.fft.fftfreq(5, _i64_2d), _Array2D[np.float64]) +assert_type(np.fft.fftfreq(5, _f32_2d), _Array2D[np.float64]) +assert_type(np.fft.fftfreq(5, _f80_2d), _Array2D[np.longdouble]) +assert_type(np.fft.fftfreq(5, _c64_2d), _Array2D[np.complex128]) +assert_type(np.fft.fftfreq(5, _c160_2d), _Array2D[np.clongdouble]) + +assert_type(np.fft.fftfreq(5, _i64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.fftfreq(5, _f32_nd), npt.NDArray[np.float64]) +assert_type(np.fft.fftfreq(5, _f80_nd), npt.NDArray[np.longdouble]) +assert_type(np.fft.fftfreq(5, _c64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.fftfreq(5, _c160_nd), npt.NDArray[np.clongdouble]) + +# rfftfreq (same as fftfreq) + +assert_type(np.fft.rfftfreq(5), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, True), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, 1), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, 1.0), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, 1j), _Array1D[np.complex128 | Any]) + +assert_type(np.fft.rfftfreq(5, _i64), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, _f32), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, _f80), _Array1D[np.longdouble]) +assert_type(np.fft.rfftfreq(5, _c64), _Array1D[np.complex128]) +assert_type(np.fft.rfftfreq(5, _c160), _Array1D[np.clongdouble]) + +assert_type(np.fft.rfftfreq(5, _i64_2d), _Array2D[np.float64]) +assert_type(np.fft.rfftfreq(5, _f32_2d), _Array2D[np.float64]) +assert_type(np.fft.rfftfreq(5, _f80_2d), _Array2D[np.longdouble]) +assert_type(np.fft.rfftfreq(5, _c64_2d), _Array2D[np.complex128]) +assert_type(np.fft.rfftfreq(5, _c160_2d), _Array2D[np.clongdouble]) + +assert_type(np.fft.rfftfreq(5, _i64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.rfftfreq(5, _f32_nd), npt.NDArray[np.float64]) +assert_type(np.fft.rfftfreq(5, _f80_nd), npt.NDArray[np.longdouble]) +assert_type(np.fft.rfftfreq(5, _c64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.rfftfreq(5, _c160_nd), npt.NDArray[np.clongdouble]) + +# *fft + +assert_type(np.fft.fft(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.fft(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.fft(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.fft(_c64_2d), _Array2D[np.complex64]) +assert_type(np.fft.fft(_py_float_1d), _Array1D[np.complex128]) + +assert_type(np.fft.ifft(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.ifft(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.ifft(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.ifft(_c64_2d), _Array2D[np.complex64]) +assert_type(np.fft.ifft(_py_float_1d), _Array1D[np.complex128]) + +assert_type(np.fft.rfft(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.rfft(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.rfft(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.rfft(_py_float_1d), _Array1D[np.complex128]) + +assert_type(np.fft.irfft(_f64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.irfft(_i64_2d), _Array2D[np.float64]) +assert_type(np.fft.irfft(_f16_2d), _Array2D[np.float16]) +assert_type(np.fft.irfft(_f32_2d), _Array2D[np.float32]) +assert_type(np.fft.irfft(_c64_2d), _Array2D[np.float32]) +assert_type(np.fft.irfft(_py_complex_1d), _Array1D[np.float64]) + +assert_type(np.fft.hfft(_f64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.hfft(_i64_2d), _Array2D[np.float64]) +assert_type(np.fft.hfft(_f16_2d), _Array2D[np.float16]) +assert_type(np.fft.hfft(_f32_2d), _Array2D[np.float32]) +assert_type(np.fft.hfft(_c64_2d), _Array2D[np.float32]) +assert_type(np.fft.hfft(_py_complex_1d), _Array1D[np.float64]) + +assert_type(np.fft.ihfft(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.ihfft(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.ihfft(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.ihfft(_py_float_1d), _Array1D[np.complex128]) + +# *fftn + +assert_type(np.fft.fftn(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.fftn(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.fftn(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.fftn(_c64_2d), _Array2D[np.complex64]) +assert_type(np.fft.fftn(_py_float_1d), _Array1D[np.complex128]) + +assert_type(np.fft.ifftn(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.ifftn(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.ifftn(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.ifftn(_c64_2d), _Array2D[np.complex64]) +assert_type(np.fft.ifftn(_py_float_1d), _Array1D[np.complex128]) + +assert_type(np.fft.rfftn(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.rfftn(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.rfftn(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.rfftn(_py_float_1d), _Array1D[np.complex128]) + +assert_type(np.fft.irfftn(_f64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.irfftn(_i64_2d), _Array2D[np.float64]) +assert_type(np.fft.irfftn(_f16_2d), _Array2D[np.float16]) +assert_type(np.fft.irfftn(_f32_2d), _Array2D[np.float32]) +assert_type(np.fft.irfftn(_c64_2d), _Array2D[np.float32]) +assert_type(np.fft.irfftn(_py_complex_1d), _Array1D[np.float64]) + +# *fft2 + +assert_type(np.fft.fft2(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.fft2(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.fft2(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.fft2(_c64_2d), _Array2D[np.complex64]) +assert_type(np.fft.fft2(_py_float_1d), _Array1D[np.complex128]) + +assert_type(np.fft.ifft2(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.ifft2(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.ifft2(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.ifft2(_c64_2d), _Array2D[np.complex64]) +assert_type(np.fft.ifft2(_py_float_1d), _Array1D[np.complex128]) + +assert_type(np.fft.rfft2(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.rfft2(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.rfft2(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.rfft2(_py_float_1d), _Array1D[np.complex128]) + +assert_type(np.fft.irfft2(_f64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.irfft2(_i64_2d), _Array2D[np.float64]) +assert_type(np.fft.irfft2(_f16_2d), _Array2D[np.float16]) +assert_type(np.fft.irfft2(_f32_2d), _Array2D[np.float32]) +assert_type(np.fft.irfft2(_c64_2d), _Array2D[np.float32]) +assert_type(np.fft.irfft2(_py_complex_1d), _Array1D[np.float64]) diff --git a/numpy/typing/tests/data/reveal/flatiter.pyi b/numpy/typing/tests/data/reveal/flatiter.pyi index 98d61a6d3428..4907f8464cf2 100644 --- a/numpy/typing/tests/data/reveal/flatiter.pyi +++ b/numpy/typing/tests/data/reveal/flatiter.pyi @@ -1,10 +1,10 @@ -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np -_ArrayND: TypeAlias = np.ndarray[tuple[Any, ...], np.dtypes.StrDType] -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtypes.BytesDType] -_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtypes.Int8DType] +type _ArrayND = np.ndarray[tuple[Any, ...], np.dtypes.StrDType] +type _Array1D = np.ndarray[tuple[int], np.dtypes.BytesDType] +type _Array2D = np.ndarray[tuple[int, int], np.dtypes.Int8DType] _a_nd: np.flatiter[_ArrayND] _a_1d: np.flatiter[_Array1D] diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 58ea2c5f8732..af473fd41305 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -1,20 +1,27 @@ """Tests for :mod:`_core.fromnumeric`.""" +import datetime as dt from typing import Any, assert_type import numpy as np import numpy.typing as npt -class NDArraySubclass(npt.NDArray[np.complex128]): ... +class NDArraySubclass(np.ndarray[tuple[Any, ...], np.dtype[np.complex128]]): ... AR_b: npt.NDArray[np.bool] AR_f4: npt.NDArray[np.float32] +AR_f4_1d: np.ndarray[tuple[int], np.dtype[np.float32]] +AR_f4_2d: np.ndarray[tuple[int, int], np.dtype[np.float32]] +AR_f4_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float32]] AR_c16: npt.NDArray[np.complex128] AR_u8: npt.NDArray[np.uint64] AR_i8: npt.NDArray[np.int64] AR_O: npt.NDArray[np.object_] AR_subclass: NDArraySubclass -AR_m: npt.NDArray[np.timedelta64] +AR_m_ns: npt.NDArray[np.timedelta64[int]] +AR_m_s: npt.NDArray[np.timedelta64[dt.timedelta]] +AR_m_nat: npt.NDArray[np.timedelta64[None]] +AR_M_ns: npt.NDArray[np.datetime64[int]] AR_0d: np.ndarray[tuple[()]] AR_1d: np.ndarray[tuple[int]] AR_nd: np.ndarray @@ -24,8 +31,15 @@ f4: np.float32 i8: np.int64 f: float +_py_list_1d: list[int] +_py_list_2d: list[list[int]] +_py_list_3d: list[list[list[int]]] + +_dtype_list: list[np.dtype] +_any_list: list[Any] + # integer‑dtype subclass for argmin/argmax -class NDArrayIntSubclass(npt.NDArray[np.intp]): ... +class NDArrayIntSubclass(np.ndarray[tuple[Any, ...], np.dtype[np.intp]]): ... AR_sub_i: NDArrayIntSubclass assert_type(np.take(b, 0), np.bool) @@ -68,36 +82,66 @@ assert_type(np.transpose(f4), npt.NDArray[np.float32]) assert_type(np.transpose(f), npt.NDArray[Any]) assert_type(np.transpose(AR_b), npt.NDArray[np.bool]) assert_type(np.transpose(AR_f4), npt.NDArray[np.float32]) +assert_type(np.transpose(AR_f4_1d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.transpose(AR_f4_2d), np.ndarray[tuple[int, int], np.dtype[np.float32]]) -assert_type(np.partition(b, 0, axis=None), npt.NDArray[np.bool]) -assert_type(np.partition(f4, 0, axis=None), npt.NDArray[np.float32]) -assert_type(np.partition(f, 0, axis=None), npt.NDArray[Any]) assert_type(np.partition(AR_b, 0), npt.NDArray[np.bool]) +assert_type(np.partition(AR_b, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.partition(AR_f4, 0), npt.NDArray[np.float32]) +assert_type(np.partition(AR_f4, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.partition(AR_f4_1d, 0), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.partition(AR_f4_2d, 0), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.partition(AR_f4_3d, 0), np.ndarray[tuple[int, int, int], np.dtype[np.float32]]) assert_type(np.argpartition(b, 0), npt.NDArray[np.intp]) assert_type(np.argpartition(f4, 0), npt.NDArray[np.intp]) assert_type(np.argpartition(f, 0), npt.NDArray[np.intp]) assert_type(np.argpartition(AR_b, 0), npt.NDArray[np.intp]) assert_type(np.argpartition(AR_f4, 0), npt.NDArray[np.intp]) - -assert_type(np.sort([2, 1], 0), npt.NDArray[Any]) -assert_type(np.sort(AR_b, 0), npt.NDArray[np.bool]) -assert_type(np.sort(AR_f4, 0), npt.NDArray[np.float32]) - -assert_type(np.argsort(AR_b, 0), npt.NDArray[np.intp]) -assert_type(np.argsort(AR_f4, 0), npt.NDArray[np.intp]) - -assert_type(np.argmax(AR_b), np.intp) +assert_type(np.argpartition(b, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argpartition(f4, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argpartition(f, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argpartition(AR_b, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argpartition(AR_f4, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) + +assert_type(np.sort([2, 1]), npt.NDArray[Any]) +assert_type(np.sort(AR_b), npt.NDArray[np.bool]) +assert_type(np.sort(AR_f4), npt.NDArray[np.float32]) +assert_type(np.sort(AR_f4_1d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.sort(AR_f4_2d), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.sort([2, 1], axis=None), np.ndarray[tuple[int]]) +assert_type(np.sort(AR_b, axis=None), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.sort(AR_f4, axis=None), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.sort(AR_f4_1d, axis=None), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.sort(AR_f4_2d, axis=None), np.ndarray[tuple[int], np.dtype[np.float32]]) + +assert_type(np.argsort([2, 1]), npt.NDArray[np.intp]) +assert_type(np.argsort(AR_b), npt.NDArray[np.intp]) +assert_type(np.argsort(AR_f4), npt.NDArray[np.intp]) +assert_type(np.argsort(AR_f4_1d), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argsort(AR_f4_2d), np.ndarray[tuple[int, int], np.dtype[np.intp]]) +assert_type(np.argsort([2, 1], axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argsort(AR_b, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argsort(AR_f4, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argsort(AR_f4_1d, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argsort(AR_f4_2d, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) + +# same as below assert_type(np.argmax(AR_f4), np.intp) -assert_type(np.argmax(AR_b, axis=0), Any) -assert_type(np.argmax(AR_f4, axis=0), Any) +assert_type(np.argmax(AR_f4, axis=0), npt.NDArray[np.intp]) +assert_type(np.argmax(AR_f4, keepdims=True), npt.NDArray[np.intp]) +assert_type(np.argmax(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argmax(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.intp]]) +assert_type(np.argmax(AR_f4_3d, keepdims=True), np.ndarray[tuple[int, int, int], np.dtype[np.intp]]) assert_type(np.argmax(AR_f4, out=AR_sub_i), NDArrayIntSubclass) -assert_type(np.argmin(AR_b), np.intp) +# same as above assert_type(np.argmin(AR_f4), np.intp) -assert_type(np.argmin(AR_b, axis=0), Any) -assert_type(np.argmin(AR_f4, axis=0), Any) +assert_type(np.argmin(AR_f4, axis=0), npt.NDArray[np.intp]) +assert_type(np.argmin(AR_f4, keepdims=True), npt.NDArray[np.intp]) +assert_type(np.argmin(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argmin(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.intp]]) +assert_type(np.argmin(AR_f4_3d, keepdims=True), np.ndarray[tuple[int, int, int], np.dtype[np.intp]]) assert_type(np.argmin(AR_f4, out=AR_sub_i), NDArrayIntSubclass) assert_type(np.searchsorted(AR_b[0], 0), np.intp) @@ -119,6 +163,8 @@ assert_type(np.squeeze(AR_f4), npt.NDArray[np.float32]) assert_type(np.diagonal(AR_b), npt.NDArray[np.bool]) assert_type(np.diagonal(AR_f4), npt.NDArray[np.float32]) +assert_type(np.diagonal(AR_f4_2d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.diagonal(AR_f4_3d), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.trace(AR_b), Any) assert_type(np.trace(AR_f4), Any) @@ -131,16 +177,21 @@ assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) assert_type(np.ravel(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.nonzero(AR_b), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) -assert_type(np.nonzero(AR_f4), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) -assert_type(np.nonzero(AR_1d), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) -assert_type(np.nonzero(AR_nd), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +type _Int1D = np.ndarray[tuple[int], np.dtype[np.intp]] + +assert_type(np.nonzero(AR_f4), tuple[_Int1D, ...]) +assert_type(np.nonzero(AR_f4_1d), tuple[_Int1D]) +assert_type(np.nonzero(AR_f4_2d), tuple[_Int1D, _Int1D]) +assert_type(np.nonzero(AR_f4_3d), tuple[_Int1D, _Int1D, _Int1D]) assert_type(np.shape(b), tuple[()]) assert_type(np.shape(f), tuple[()]) assert_type(np.shape([1]), tuple[int]) assert_type(np.shape([[2]]), tuple[int, int]) -assert_type(np.shape([[[3]]]), tuple[Any, ...]) +assert_type(np.shape([[[3]]]), tuple[int, int, int]) +assert_type(np.shape(_py_list_1d), tuple[int]) +assert_type(np.shape(_py_list_2d), tuple[int, int]) +assert_type(np.shape(_py_list_3d), tuple[int, int, int]) assert_type(np.shape(AR_b), tuple[Any, ...]) assert_type(np.shape(AR_nd), tuple[Any, ...]) # these fail on mypy, but it works as expected with pyright/pylance @@ -161,31 +212,20 @@ assert_type(np.clip(AR_b, 0, 1), npt.NDArray[np.bool]) assert_type(np.clip(AR_f4, 0, 1), npt.NDArray[np.float32]) assert_type(np.clip([0], 0, 1), npt.NDArray[Any]) assert_type(np.clip(AR_b, 0, 1, out=AR_subclass), NDArraySubclass) - -assert_type(np.sum(b), np.bool) -assert_type(np.sum(f4), np.float32) -assert_type(np.sum(f), Any) -assert_type(np.sum(AR_b), np.bool) -assert_type(np.sum(AR_f4), np.float32) -assert_type(np.sum(AR_b, axis=0), Any) -assert_type(np.sum(AR_f4, axis=0), Any) -assert_type(np.sum(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.sum(AR_f4, dtype=np.float64), np.float64) -assert_type(np.sum(AR_f4, None, np.float64), np.float64) -assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=False), np.float64) -assert_type(np.sum(AR_f4, None, np.float64, keepdims=False), np.float64) -assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) -assert_type(np.sum(AR_f4, None, np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) +assert_type(np.clip(AR_f4_1d, 0, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) assert_type(np.all(b), np.bool) assert_type(np.all(f4), np.bool) assert_type(np.all(f), np.bool) assert_type(np.all(AR_b), np.bool) assert_type(np.all(AR_f4), np.bool) -assert_type(np.all(AR_b, axis=0), Any) -assert_type(np.all(AR_f4, axis=0), Any) -assert_type(np.all(AR_b, keepdims=True), Any) -assert_type(np.all(AR_f4, keepdims=True), Any) +assert_type(np.all(AR_b, axis=0), npt.NDArray[np.bool]) +assert_type(np.all(AR_f4, axis=0), npt.NDArray[np.bool]) +assert_type(np.all(AR_b, keepdims=True), npt.NDArray[np.bool]) +assert_type(np.all(AR_f4, keepdims=True), npt.NDArray[np.bool]) +assert_type(np.all(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.all(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.all(AR_f4_3d, keepdims=True), np.ndarray[tuple[int, int, int], np.dtype[np.bool]]) assert_type(np.all(AR_f4, out=AR_subclass), NDArraySubclass) assert_type(np.any(b), np.bool) @@ -193,95 +233,135 @@ assert_type(np.any(f4), np.bool) assert_type(np.any(f), np.bool) assert_type(np.any(AR_b), np.bool) assert_type(np.any(AR_f4), np.bool) -assert_type(np.any(AR_b, axis=0), Any) -assert_type(np.any(AR_f4, axis=0), Any) -assert_type(np.any(AR_b, keepdims=True), Any) -assert_type(np.any(AR_f4, keepdims=True), Any) +assert_type(np.any(AR_b, axis=0), npt.NDArray[np.bool]) +assert_type(np.any(AR_f4, axis=0), npt.NDArray[np.bool]) +assert_type(np.any(AR_b, keepdims=True), npt.NDArray[np.bool]) +assert_type(np.any(AR_f4, keepdims=True), npt.NDArray[np.bool]) +assert_type(np.any(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.any(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.any(AR_f4_3d, keepdims=True), np.ndarray[tuple[int, int, int], np.dtype[np.bool]]) assert_type(np.any(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.cumsum(b), npt.NDArray[np.bool]) -assert_type(np.cumsum(f4), npt.NDArray[np.float32]) -assert_type(np.cumsum(f), npt.NDArray[Any]) -assert_type(np.cumsum(AR_b), npt.NDArray[np.bool]) -assert_type(np.cumsum(AR_f4), npt.NDArray[np.float32]) -assert_type(np.cumsum(f, dtype=float), npt.NDArray[Any]) -assert_type(np.cumsum(f, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumsum(b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.cumsum(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.cumsum(f), np.ndarray[tuple[int]]) +assert_type(np.cumsum(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.cumsum(AR_b, axis=0), npt.NDArray[np.bool]) +assert_type(np.cumsum(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.cumsum(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.cumsum(f, dtype=float), np.ndarray[tuple[int]]) +assert_type(np.cumsum(f, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.cumsum(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.cumulative_sum(b), npt.NDArray[np.bool]) -assert_type(np.cumulative_sum(f4), npt.NDArray[np.float32]) -assert_type(np.cumulative_sum(f), npt.NDArray[Any]) -assert_type(np.cumulative_sum(AR_b), npt.NDArray[np.bool]) -assert_type(np.cumulative_sum(AR_f4), npt.NDArray[np.float32]) -assert_type(np.cumulative_sum(f, dtype=float), npt.NDArray[Any]) -assert_type(np.cumulative_sum(f, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumulative_sum(b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.cumulative_sum(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.cumulative_sum(f), np.ndarray[tuple[int]]) +assert_type(np.cumulative_sum(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.cumulative_sum(AR_b, axis=0), npt.NDArray[np.bool]) +assert_type(np.cumulative_sum(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.cumulative_sum(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.cumulative_sum(f, dtype=float), np.ndarray[tuple[int]]) +assert_type(np.cumulative_sum(f, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.cumulative_sum(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.ptp(b), np.bool) -assert_type(np.ptp(f4), np.float32) -assert_type(np.ptp(f), Any) -assert_type(np.ptp(AR_b), np.bool) +assert_type(np.ptp(AR_i8), np.int64) +assert_type(np.ptp(AR_i8, axis=0), npt.NDArray[np.int64]) +assert_type(np.ptp(AR_i8, keepdims=True), npt.NDArray[np.int64]) +assert_type(np.ptp(AR_i8, axis=0, keepdims=True), npt.NDArray[np.int64]) assert_type(np.ptp(AR_f4), np.float32) -assert_type(np.ptp(AR_b, axis=0), Any) -assert_type(np.ptp(AR_f4, axis=0), Any) -assert_type(np.ptp(AR_b, keepdims=True), Any) -assert_type(np.ptp(AR_f4, keepdims=True), Any) +assert_type(np.ptp(AR_c16), np.complex128) +assert_type(np.ptp(AR_O), Any) +assert_type(np.ptp(AR_O, axis=0), npt.NDArray[np.object_]) +assert_type(np.ptp(AR_O, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.ptp(AR_O, axis=0, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.ptp(AR_m_ns), np.timedelta64[int]) +assert_type(np.ptp(AR_m_s), np.timedelta64[dt.timedelta]) +assert_type(np.ptp(AR_m_nat), np.timedelta64[None]) +assert_type(np.ptp(AR_M_ns), np.timedelta64) +assert_type(np.ptp(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.ptp(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.ptp(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.ptp(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.ptp(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.ptp(AR_nd), Any) -assert_type(np.amax(b), np.bool) -assert_type(np.amax(f4), np.float32) -assert_type(np.amax(f), Any) +# same as below +assert_type(np.amax(AR_i8), np.int64) +assert_type(np.amax(AR_i8, axis=0), npt.NDArray[np.int64]) +assert_type(np.amax(AR_i8, keepdims=True), npt.NDArray[np.int64]) +assert_type(np.amax(AR_i8, axis=0, keepdims=True), npt.NDArray[np.int64]) assert_type(np.amax(AR_b), np.bool) assert_type(np.amax(AR_f4), np.float32) -assert_type(np.amax(AR_b, axis=0), Any) -assert_type(np.amax(AR_f4, axis=0), Any) -assert_type(np.amax(AR_b, keepdims=True), Any) -assert_type(np.amax(AR_f4, keepdims=True), Any) +assert_type(np.amax(AR_c16), np.complex128) +assert_type(np.amax(AR_O), Any) +assert_type(np.amax(AR_O, axis=0), npt.NDArray[np.object_]) +assert_type(np.amax(AR_O, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.amax(AR_O, axis=0, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.amax(AR_m_ns), np.timedelta64[int]) +assert_type(np.amax(AR_m_s), np.timedelta64[dt.timedelta]) +assert_type(np.amax(AR_m_nat), np.timedelta64[None]) +assert_type(np.amax(AR_M_ns), np.datetime64[int]) +assert_type(np.amax(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.amax(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.amax(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.amax(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.amax(AR_f4, out=AR_subclass), NDArraySubclass) - -assert_type(np.amin(b), np.bool) -assert_type(np.amin(f4), np.float32) -assert_type(np.amin(f), Any) +assert_type(np.amax(AR_nd), Any) +assert_type(np.amax(AR_nd, axis=1), np.ndarray) +assert_type(np.amax(AR_nd, keepdims=True), np.ndarray) +assert_type(np.amax(_dtype_list), Any) +assert_type(np.amax(_dtype_list, axis=1), npt.NDArray[Any]) +assert_type(np.amax(_dtype_list, keepdims=True), npt.NDArray[Any]) +assert_type(np.amax(_any_list), Any) + +# same as above +assert_type(np.amin(AR_i8), np.int64) +assert_type(np.amin(AR_i8, axis=0), npt.NDArray[np.int64]) +assert_type(np.amin(AR_i8, keepdims=True), npt.NDArray[np.int64]) +assert_type(np.amin(AR_i8, axis=0, keepdims=True), npt.NDArray[np.int64]) assert_type(np.amin(AR_b), np.bool) assert_type(np.amin(AR_f4), np.float32) -assert_type(np.amin(AR_b, axis=0), Any) -assert_type(np.amin(AR_f4, axis=0), Any) -assert_type(np.amin(AR_b, keepdims=True), Any) -assert_type(np.amin(AR_f4, keepdims=True), Any) +assert_type(np.amin(AR_c16), np.complex128) +assert_type(np.amin(AR_O), Any) +assert_type(np.amin(AR_O, axis=0), npt.NDArray[np.object_]) +assert_type(np.amin(AR_O, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.amin(AR_O, axis=0, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.amin(AR_m_ns), np.timedelta64[int]) +assert_type(np.amin(AR_m_s), np.timedelta64[dt.timedelta]) +assert_type(np.amin(AR_m_nat), np.timedelta64[None]) +assert_type(np.amin(AR_M_ns), np.datetime64[int]) +assert_type(np.amin(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.amin(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.amin(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.amin(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.amin(AR_f4, out=AR_subclass), NDArraySubclass) - -assert_type(np.prod(AR_b), np.int_) -assert_type(np.prod(AR_u8), np.uint64) -assert_type(np.prod(AR_i8), np.int64) -assert_type(np.prod(AR_f4), np.floating) -assert_type(np.prod(AR_c16), np.complexfloating) -assert_type(np.prod(AR_O), Any) -assert_type(np.prod(AR_f4, axis=0), Any) -assert_type(np.prod(AR_f4, keepdims=True), Any) -assert_type(np.prod(AR_f4, dtype=np.float64), np.float64) -assert_type(np.prod(AR_f4, dtype=float), Any) -assert_type(np.prod(AR_f4, out=AR_subclass), NDArraySubclass) - -assert_type(np.cumprod(AR_b), npt.NDArray[np.int_]) -assert_type(np.cumprod(AR_u8), npt.NDArray[np.uint64]) -assert_type(np.cumprod(AR_i8), npt.NDArray[np.int64]) -assert_type(np.cumprod(AR_f4), npt.NDArray[np.floating]) -assert_type(np.cumprod(AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.cumprod(AR_O), npt.NDArray[np.object_]) -assert_type(np.cumprod(AR_f4, axis=0), npt.NDArray[np.floating]) -assert_type(np.cumprod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) -assert_type(np.cumprod(AR_f4, dtype=float), npt.NDArray[Any]) +assert_type(np.amin(AR_nd), Any) +assert_type(np.amin(AR_nd, axis=1), np.ndarray) +assert_type(np.amin(AR_nd, keepdims=True), np.ndarray) +assert_type(np.amin(_dtype_list), Any) +assert_type(np.amin(_dtype_list, axis=1), npt.NDArray[Any]) +assert_type(np.amin(_dtype_list, keepdims=True), npt.NDArray[Any]) + +assert_type(np.cumprod(b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.cumprod(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.cumprod(f), np.ndarray[tuple[int]]) +assert_type(np.cumprod(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.cumprod(AR_b, axis=0), npt.NDArray[np.bool]) +assert_type(np.cumprod(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.cumprod(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.cumprod(f, dtype=float), np.ndarray[tuple[int]]) +assert_type(np.cumprod(f, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.cumprod(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.cumulative_prod(AR_b), npt.NDArray[np.int_]) -assert_type(np.cumulative_prod(AR_u8), npt.NDArray[np.uint64]) -assert_type(np.cumulative_prod(AR_i8), npt.NDArray[np.int64]) -assert_type(np.cumulative_prod(AR_f4), npt.NDArray[np.floating]) -assert_type(np.cumulative_prod(AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.cumulative_prod(AR_O), npt.NDArray[np.object_]) -assert_type(np.cumulative_prod(AR_f4, axis=0), npt.NDArray[np.floating]) -assert_type(np.cumulative_prod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) -assert_type(np.cumulative_prod(AR_f4, dtype=float), npt.NDArray[Any]) +assert_type(np.cumulative_prod(b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.cumulative_prod(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.cumulative_prod(f), np.ndarray[tuple[int]]) +assert_type(np.cumulative_prod(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.cumulative_prod(AR_b, axis=0), npt.NDArray[np.bool]) +assert_type(np.cumulative_prod(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.cumulative_prod(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.cumulative_prod(f, dtype=float), np.ndarray[tuple[int]]) +assert_type(np.cumulative_prod(f, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.cumulative_prod(AR_f4, out=AR_subclass), NDArraySubclass) assert_type(np.ndim(b), int) @@ -305,43 +385,162 @@ assert_type(np.around(AR_i8), npt.NDArray[np.int64]) assert_type(np.around(AR_f4), npt.NDArray[np.float32]) assert_type(np.around([1.5]), npt.NDArray[Any]) assert_type(np.around(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.around(AR_f4_1d), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.mean(AR_b), np.floating) -assert_type(np.mean(AR_i8), np.floating) -assert_type(np.mean(AR_f4), np.floating) -assert_type(np.mean(AR_m), np.timedelta64) -assert_type(np.mean(AR_c16), np.complexfloating) -assert_type(np.mean(AR_O), Any) -assert_type(np.mean(AR_f4, axis=0), Any) -assert_type(np.mean(AR_f4, keepdims=True), Any) +assert_type(np.prod(AR_nd), Any) +assert_type(np.prod(AR_b), np.int_) +assert_type(np.prod(AR_i8), np.int64) +assert_type(np.prod(AR_i8, axis=0), npt.NDArray[np.int64]) +assert_type(np.prod(AR_i8, keepdims=True), npt.NDArray[np.int64]) +assert_type(np.prod(AR_i8, axis=0, keepdims=True), npt.NDArray[np.int64]) +assert_type(np.prod(AR_f4), np.float32) +assert_type(np.prod(AR_c16), np.complex128) +assert_type(np.prod(AR_O), Any) +assert_type(np.prod(AR_O, axis=0), npt.NDArray[np.object_]) +assert_type(np.prod(AR_O, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.prod(AR_O, axis=0, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.prod(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.prod(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.prod(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.prod(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.prod(AR_f4, dtype=float), Any) +assert_type(np.prod(AR_f4, dtype=float, axis=1), np.ndarray) +assert_type(np.prod(AR_f4, dtype=float, keepdims=True), np.ndarray) +assert_type(np.prod(AR_f4, dtype=float, axis=1, keepdims=True), np.ndarray) +assert_type(np.prod(AR_f4, dtype=np.float64), np.float64) +assert_type(np.prod(AR_f4, dtype=np.float64), np.float64) +assert_type(np.prod(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.prod(AR_f4, dtype=np.float64, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.prod(AR_f4_1d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.prod(AR_f4_2d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.prod(AR_f4, out=AR_subclass), NDArraySubclass) + +# same as above (but with `timedelta64`) +assert_type(np.sum(AR_nd), Any) +assert_type(np.sum(AR_b), np.int_) +assert_type(np.sum(AR_i8), np.int64) +assert_type(np.sum(AR_i8, axis=0), npt.NDArray[np.int64]) +assert_type(np.sum(AR_i8, keepdims=True), npt.NDArray[np.int64]) +assert_type(np.sum(AR_i8, axis=0, keepdims=True), npt.NDArray[np.int64]) +assert_type(np.sum(AR_f4), np.float32) +assert_type(np.sum(AR_c16), np.complex128) +assert_type(np.sum(AR_O), Any) +assert_type(np.sum(AR_O, axis=0), npt.NDArray[np.object_]) +assert_type(np.sum(AR_O, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.sum(AR_O, axis=0, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.sum(AR_m_ns), np.timedelta64[int]) +assert_type(np.sum(AR_m_s), np.timedelta64[dt.timedelta]) +assert_type(np.sum(AR_m_nat), np.timedelta64[None]) +assert_type(np.sum(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.sum(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.sum(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.sum(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.sum(AR_f4, dtype=float), Any) +assert_type(np.sum(AR_f4, dtype=float, axis=1), np.ndarray) +assert_type(np.sum(AR_f4, dtype=float, keepdims=True), np.ndarray) +assert_type(np.sum(AR_f4, dtype=float, axis=1, keepdims=True), np.ndarray) +assert_type(np.sum(AR_f4, dtype=np.float64), np.float64) +assert_type(np.sum(AR_f4, dtype=np.float64), np.float64) +assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.sum(AR_f4_1d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.sum(AR_f4_2d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.sum(AR_f4, out=AR_subclass), NDArraySubclass) + +# +assert_type(np.mean(AR_b), np.float64) +assert_type(np.mean(AR_i8), np.float64) +assert_type(np.mean(AR_i8, axis=0), npt.NDArray[np.float64]) +assert_type(np.mean(AR_i8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.mean(AR_i8, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.mean(AR_f4), np.float32) +assert_type(np.mean(AR_c16), np.complex128) +assert_type(np.mean(AR_O), np.float64) +assert_type(np.mean(AR_O, axis=0), npt.NDArray[np.object_]) +assert_type(np.mean(AR_O, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.mean(AR_O, axis=0, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.mean(AR_m_ns), np.timedelta64[int]) +assert_type(np.mean(AR_m_s), np.timedelta64[dt.timedelta]) +assert_type(np.mean(AR_m_nat), np.timedelta64[None]) +assert_type(np.mean(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.mean(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.mean(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.mean(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.mean(AR_f4, dtype=float), Any) +assert_type(np.mean(AR_f4, dtype=float, axis=1), np.ndarray) +assert_type(np.mean(AR_f4, dtype=float, keepdims=True), np.ndarray) +assert_type(np.mean(AR_f4, dtype=float, axis=1, keepdims=True), np.ndarray) assert_type(np.mean(AR_f4, dtype=np.float64), np.float64) -assert_type(np.mean(AR_f4, out=AR_subclass), NDArraySubclass) assert_type(np.mean(AR_f4, dtype=np.float64), np.float64) -assert_type(np.mean(AR_f4, None, np.float64), np.float64) assert_type(np.mean(AR_f4, dtype=np.float64, keepdims=False), np.float64) -assert_type(np.mean(AR_f4, None, np.float64, keepdims=False), np.float64) -assert_type(np.mean(AR_f4, dtype=np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) -assert_type(np.mean(AR_f4, None, np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) - -assert_type(np.std(AR_b), np.floating) -assert_type(np.std(AR_i8), np.floating) -assert_type(np.std(AR_f4), np.floating) -assert_type(np.std(AR_c16), np.floating) -assert_type(np.std(AR_O), Any) -assert_type(np.std(AR_f4, axis=0), Any) -assert_type(np.std(AR_f4, keepdims=True), Any) +assert_type(np.mean(AR_f4, dtype=np.float64, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.mean(AR_f4_1d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.mean(AR_f4_2d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.mean(AR_f4, out=AR_subclass), NDArraySubclass) + +# same as above +assert_type(np.std(AR_b), np.float64) +assert_type(np.std(AR_i8), np.float64) +assert_type(np.std(AR_i8, axis=0), npt.NDArray[np.float64]) +assert_type(np.std(AR_i8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.std(AR_i8, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.std(AR_f4), np.float32) +assert_type(np.std(AR_c16), np.complex128) +assert_type(np.std(AR_O), np.float64) +assert_type(np.std(AR_O, axis=0), npt.NDArray[np.object_]) +assert_type(np.std(AR_O, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.std(AR_O, axis=0, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.std(AR_m_ns), np.timedelta64[int]) +assert_type(np.std(AR_m_s), np.timedelta64[dt.timedelta]) +assert_type(np.std(AR_m_nat), np.timedelta64[None]) +assert_type(np.std(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.std(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.std(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.std(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.std(AR_f4, dtype=float), Any) +assert_type(np.std(AR_f4, dtype=float, axis=1), np.ndarray) +assert_type(np.std(AR_f4, dtype=float, keepdims=True), np.ndarray) +assert_type(np.std(AR_f4, dtype=float, axis=1, keepdims=True), np.ndarray) +assert_type(np.std(AR_f4, dtype=np.float64), np.float64) assert_type(np.std(AR_f4, dtype=np.float64), np.float64) +assert_type(np.std(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.std(AR_f4, dtype=np.float64, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.std(AR_f4_1d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.std(AR_f4_2d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) assert_type(np.std(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.var(AR_b), np.floating) -assert_type(np.var(AR_i8), np.floating) -assert_type(np.var(AR_f4), np.floating) -assert_type(np.var(AR_c16), np.floating) -assert_type(np.var(AR_O), Any) -assert_type(np.var(AR_f4, axis=0), Any) -assert_type(np.var(AR_f4, keepdims=True), Any) +# same as above +assert_type(np.var(AR_b), np.float64) +assert_type(np.var(AR_i8), np.float64) +assert_type(np.var(AR_i8, axis=0), npt.NDArray[np.float64]) +assert_type(np.var(AR_i8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.var(AR_i8, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.var(AR_f4), np.float32) +assert_type(np.var(AR_c16), np.complex128) +assert_type(np.var(AR_O), np.float64) +assert_type(np.var(AR_O, axis=0), npt.NDArray[np.object_]) +assert_type(np.var(AR_O, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.var(AR_O, axis=0, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.var(AR_m_ns), np.timedelta64[int]) +assert_type(np.var(AR_m_s), np.timedelta64[dt.timedelta]) +assert_type(np.var(AR_m_nat), np.timedelta64[None]) +assert_type(np.var(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.var(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.var(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.var(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.var(AR_f4, dtype=float), Any) +assert_type(np.var(AR_f4, dtype=float, axis=1), np.ndarray) +assert_type(np.var(AR_f4, dtype=float, keepdims=True), np.ndarray) +assert_type(np.var(AR_f4, dtype=float, axis=1, keepdims=True), np.ndarray) +assert_type(np.var(AR_f4, dtype=np.float64), np.float64) assert_type(np.var(AR_f4, dtype=np.float64), np.float64) +assert_type(np.var(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.var(AR_f4, dtype=np.float64, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.var(AR_f4_1d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.var(AR_f4_2d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) assert_type(np.var(AR_f4, out=AR_subclass), NDArraySubclass) + +### + +data: npt.NDArray[np.void] +polygons = data["vectors"].sum(axis=1) diff --git a/numpy/typing/tests/data/reveal/histograms.pyi b/numpy/typing/tests/data/reveal/histograms.pyi index c1c63d59cb88..278961247698 100644 --- a/numpy/typing/tests/data/reveal/histograms.pyi +++ b/numpy/typing/tests/data/reveal/histograms.pyi @@ -3,23 +3,67 @@ from typing import Any, assert_type import numpy as np import numpy.typing as npt +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] + +AR_i4: npt.NDArray[np.int32] AR_i8: npt.NDArray[np.int64] +AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] +AR_c8: npt.NDArray[np.complex64] +AR_c16: npt.NDArray[np.complex128] + +list_i: list[int] +list_f: list[float] +list_c: list[complex] + +### + +assert_type(np.histogram_bin_edges(AR_i8, bins="auto"), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(AR_i8, bins="rice", range=(0, 3)), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(AR_i8, bins="scott", weights=AR_f8), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(AR_f4), _Array1D[np.float32]) +assert_type(np.histogram_bin_edges(AR_f8), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(AR_c8), _Array1D[np.complex64]) +assert_type(np.histogram_bin_edges(AR_c16), _Array1D[np.complex128]) +assert_type(np.histogram_bin_edges(list_i), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(list_f), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(list_c), _Array1D[np.complex128]) + +assert_type(np.histogram(AR_i8, bins="auto"), tuple[_Array1D[np.intp], _Array1D[np.float64]]) +assert_type(np.histogram(AR_i8, bins="rice", range=(0, 3)), tuple[_Array1D[np.intp], _Array1D[np.float64]]) +assert_type(np.histogram(AR_i8, bins="scott", weights=AR_f8), tuple[_Array1D[np.float64], _Array1D[np.float64]]) +assert_type(np.histogram(AR_f8, bins=1, density=True), tuple[_Array1D[np.float64], _Array1D[np.float64]]) +assert_type(np.histogram(AR_f4), tuple[_Array1D[np.intp], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f8), tuple[_Array1D[np.intp], _Array1D[np.float64]]) +assert_type(np.histogram(AR_c8), tuple[_Array1D[np.intp], _Array1D[np.complex64]]) +assert_type(np.histogram(AR_c16), tuple[_Array1D[np.intp], _Array1D[np.complex128]]) +assert_type(np.histogram(list_i), tuple[_Array1D[np.intp], _Array1D[np.float64]]) +assert_type(np.histogram(list_f), tuple[_Array1D[np.intp], _Array1D[np.float64]]) +assert_type(np.histogram(list_c), tuple[_Array1D[np.intp], _Array1D[np.complex128]]) +assert_type(np.histogram(AR_f4, density=True), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=AR_i4), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=AR_f4), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=AR_f8), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=AR_c8), tuple[_Array1D[np.complex128], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=AR_c16), tuple[_Array1D[np.complex128], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=list_i), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=list_f), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=list_c), tuple[_Array1D[np.complex128], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=AR_i4), tuple[_Array1D[np.int32], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=AR_f4), tuple[_Array1D[np.float32], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=AR_f8), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=AR_c8), tuple[_Array1D[np.complex64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=AR_c16), tuple[_Array1D[np.complex128], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=list_i), tuple[_Array1D[np.intp], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=list_f), tuple[_Array1D[Any], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=list_c), tuple[_Array1D[Any], _Array1D[np.float32]]) -assert_type(np.histogram_bin_edges(AR_i8, bins="auto"), npt.NDArray[Any]) -assert_type(np.histogram_bin_edges(AR_i8, bins="rice", range=(0, 3)), npt.NDArray[Any]) -assert_type(np.histogram_bin_edges(AR_i8, bins="scott", weights=AR_f8), npt.NDArray[Any]) - -assert_type(np.histogram(AR_i8, bins="auto"), tuple[npt.NDArray[Any], npt.NDArray[Any]]) -assert_type(np.histogram(AR_i8, bins="rice", range=(0, 3)), tuple[npt.NDArray[Any], npt.NDArray[Any]]) -assert_type(np.histogram(AR_i8, bins="scott", weights=AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) -assert_type(np.histogram(AR_f8, bins=1, density=True), tuple[npt.NDArray[Any], npt.NDArray[Any]]) - -assert_type(np.histogramdd(AR_i8, bins=[1]), - tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) -assert_type(np.histogramdd(AR_i8, range=[(0, 3)]), - tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) -assert_type(np.histogramdd(AR_i8, weights=AR_f8), - tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) -assert_type(np.histogramdd(AR_f8, density=True), - tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) +assert_type(np.histogramdd(AR_i8, bins=[1]), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_i8, range=[(0, 3)]), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_i8, weights=AR_f8), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_f8, density=True), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_i4), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_i8), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_f4), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float32], ...]]) +assert_type(np.histogramdd(AR_c8), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.complex64], ...]]) +assert_type(np.histogramdd(AR_c16), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.complex128], ...]]) diff --git a/numpy/typing/tests/data/reveal/index_tricks.pyi b/numpy/typing/tests/data/reveal/index_tricks.pyi index f6067c3bed6b..6567deb9c3f4 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.pyi +++ b/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -7,12 +7,21 @@ import numpy.typing as npt AR_LIKE_b: list[bool] AR_LIKE_i: list[int] AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_S: list[bytes] AR_LIKE_U: list[str] AR_LIKE_O: list[object] AR_i8: npt.NDArray[np.int64] +AR_i8_2d: np.ndarray[tuple[int, int], np.dtype[np.int64]] +AR_i8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.int64]] +AR_i8_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.int64]] +AR_i8_5d: np.ndarray[tuple[int, int, int, int, int], np.dtype[np.int64]] +AR_f4: npt.NDArray[np.float32] AR_O: npt.NDArray[np.object_] +type _Int1D = np.ndarray[tuple[int], np.dtype[np.intp]] + assert_type(np.ndenumerate(AR_i8), np.ndenumerate[np.int64]) assert_type(np.ndenumerate(AR_LIKE_f), np.ndenumerate[np.float64]) assert_type(np.ndenumerate(AR_LIKE_U), np.ndenumerate[np.str_]) @@ -58,13 +67,82 @@ assert_type(np.s_[0:1], slice[int, int, None]) assert_type(np.s_[0:1, None:3], tuple[slice[int, int, None], slice[None, int, None]]) assert_type(np.s_[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice[int, int, None], EllipsisType, list[int]]) -assert_type(np.ix_(AR_LIKE_b), tuple[npt.NDArray[np.bool], ...]) -assert_type(np.ix_(AR_LIKE_i, AR_LIKE_f), tuple[npt.NDArray[np.float64], ...]) -assert_type(np.ix_(AR_i8), tuple[npt.NDArray[np.int64], ...]) +assert_type(np.ix_(AR_LIKE_b), tuple[np.ndarray[tuple[int], np.dtype[np.int_]]]) +assert_type(np.ix_(AR_LIKE_i), tuple[np.ndarray[tuple[int], np.dtype[np.int_]]]) +assert_type(np.ix_(AR_f4), tuple[np.ndarray[tuple[int], np.dtype[np.float32]]]) +assert_type( + np.ix_(AR_LIKE_b, AR_LIKE_b), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.int_]], + np.ndarray[tuple[int, int], np.dtype[np.int_]], + ], +) +assert_type( + np.ix_(AR_LIKE_i, AR_LIKE_i), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.int_]], + np.ndarray[tuple[int, int], np.dtype[np.int_]], + ], +) +assert_type( + np.ix_(AR_f4, AR_f4), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.float32]], + np.ndarray[tuple[int, int], np.dtype[np.float32]], + ], +) +assert_type( + np.ix_(AR_LIKE_b, AR_LIKE_b, AR_LIKE_b), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.int_]], + np.ndarray[tuple[int, int, int], np.dtype[np.int_]], + np.ndarray[tuple[int, int, int], np.dtype[np.int_]], + ], +) +assert_type( + np.ix_(AR_LIKE_i, AR_LIKE_i, AR_LIKE_i), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.int_]], + np.ndarray[tuple[int, int, int], np.dtype[np.int_]], + np.ndarray[tuple[int, int, int], np.dtype[np.int_]], + ], +) +assert_type( + np.ix_(AR_f4, AR_f4, AR_f4), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.float32]], + np.ndarray[tuple[int, int, int], np.dtype[np.float32]], + np.ndarray[tuple[int, int, int], np.dtype[np.float32]], + ], +) +assert_type( + np.ix_(AR_LIKE_b, AR_LIKE_b, AR_LIKE_b, AR_LIKE_b), + tuple[npt.NDArray[np.int_], ...], +) +assert_type( + np.ix_(AR_LIKE_i, AR_LIKE_i, AR_LIKE_i, AR_LIKE_b), + tuple[npt.NDArray[np.int_], ...], +) +assert_type( + np.ix_(AR_f4, AR_f4, AR_f4, AR_f4), + tuple[npt.NDArray[np.float32], ...], +) +assert_type(np.ix_(AR_LIKE_f), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.ix_(AR_LIKE_c), tuple[npt.NDArray[np.complex128], ...]) +assert_type(np.ix_(AR_LIKE_S), tuple[npt.NDArray[np.bytes_], ...]) +assert_type(np.ix_(AR_LIKE_U), tuple[npt.NDArray[np.str_], ...]) assert_type(np.fill_diagonal(AR_i8, 5), None) -assert_type(np.diag_indices(4), tuple[npt.NDArray[np.int_], ...]) -assert_type(np.diag_indices(2, 3), tuple[npt.NDArray[np.int_], ...]) - -assert_type(np.diag_indices_from(AR_i8), tuple[npt.NDArray[np.int_], ...]) +assert_type(np.diag_indices(4), tuple[_Int1D, _Int1D]) +assert_type(np.diag_indices(4, 0), tuple[()]) +assert_type(np.diag_indices(4, 1), tuple[_Int1D]) +assert_type(np.diag_indices(4, 2), tuple[_Int1D, _Int1D]) +assert_type(np.diag_indices(4, 3), tuple[_Int1D, _Int1D, _Int1D]) +assert_type(np.diag_indices(4, 4), tuple[_Int1D, ...]) + +assert_type(np.diag_indices_from(AR_i8), tuple[_Int1D, _Int1D, *tuple[_Int1D, ...]]) +assert_type(np.diag_indices_from(AR_i8_2d), tuple[_Int1D, _Int1D]) +assert_type(np.diag_indices_from(AR_i8_3d), tuple[_Int1D, _Int1D, _Int1D]) +assert_type(np.diag_indices_from(AR_i8_4d), tuple[_Int1D, _Int1D, _Int1D, _Int1D]) +assert_type(np.diag_indices_from(AR_i8_5d), tuple[_Int1D, _Int1D, *tuple[_Int1D, ...]]) diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index 345635d06327..bab43106d1c5 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -26,7 +26,7 @@ AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] AR_b: npt.NDArray[np.bool] AR_U: npt.NDArray[np.str_] -CHAR_AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] +CHAR_AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] # type: ignore[deprecated] AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] @@ -130,8 +130,8 @@ assert_type(np.place(AR_f8, mask=AR_i8, vals=5.0), None) # copy assert_type(np.copy(AR_LIKE_f8), np.ndarray) assert_type(np.copy(AR_U), npt.NDArray[np.str_]) -assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) -assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) # type: ignore[deprecated] +assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) # type: ignore[deprecated] # pyright correctly infers `NDArray[str_]` here assert_type(np.copy(CHAR_AR_U), np.ndarray[Any, Any]) # pyright: ignore[reportAssertTypeFailure] @@ -270,7 +270,8 @@ assert_type(np.sinc(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128] # median assert_type(np.median(AR_f8, keepdims=False), np.float64) assert_type(np.median(AR_c16, overwrite_input=True), np.complex128) -assert_type(np.median(AR_m), np.timedelta64) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.median(AR_m), np.timedelta64) # type: ignore[assert-type] assert_type(np.median(AR_O), Any) assert_type(np.median(AR_f8, keepdims=True), npt.NDArray[np.float64]) assert_type(np.median(AR_f8, axis=0), npt.NDArray[np.float64]) @@ -332,7 +333,8 @@ assert_type(np.trapezoid(AR_f8), np.float64 | npt.NDArray[np.float64]) assert_type(np.trapezoid(AR_f8, AR_f8), np.float64 | npt.NDArray[np.float64]) assert_type(np.trapezoid(AR_c16), np.complex128 | npt.NDArray[np.complex128]) assert_type(np.trapezoid(AR_c16, AR_c16), np.complex128 | npt.NDArray[np.complex128]) -assert_type(np.trapezoid(AR_m), np.timedelta64 | npt.NDArray[np.timedelta64]) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.trapezoid(AR_m), np.timedelta64 | npt.NDArray[np.timedelta64]) # type: ignore[assert-type] assert_type(np.trapezoid(AR_O), npt.NDArray[np.object_] | Any) assert_type(np.trapezoid(AR_O, AR_LIKE_f8), npt.NDArray[np.object_] | Any) diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 60056516def0..a1155e2bb5ed 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -1,4 +1,4 @@ -from typing import Any, assert_type +from typing import Any, Literal, assert_type import numpy as np import numpy.typing as npt @@ -10,127 +10,353 @@ from numpy.linalg._linalg import ( SVDResult, ) +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] + +bool_list_1d: list[bool] +bool_list_2d: list[list[bool]] +int_list_1d: list[int] +int_list_2d: list[list[int]] +float_list_1d: list[float] float_list_2d: list[list[float]] +float_list_3d: list[list[list[float]]] +float_list_4d: list[list[list[list[float]]]] +complex_list_1d: list[complex] +complex_list_2d: list[list[complex]] +complex_list_3d: list[list[list[complex]]] +bytes_list_2d: list[list[bytes]] +str_list_2d: list[list[str]] + +AR_any: np.ndarray +AR_f_: npt.NDArray[np.floating] +AR_c_: npt.NDArray[np.complexfloating] AR_i8: npt.NDArray[np.int64] +AR_f2: npt.NDArray[np.float16] AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] +AR_f10: npt.NDArray[np.longdouble] AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] +AR_c20: npt.NDArray[np.clongdouble] AR_O: npt.NDArray[np.object_] +AR_M: npt.NDArray[np.datetime64] AR_m: npt.NDArray[np.timedelta64] -AR_S: npt.NDArray[np.str_] +AR_S: npt.NDArray[np.bytes_] +AR_U: npt.NDArray[np.str_] AR_b: npt.NDArray[np.bool] +AR_b_1d: np.ndarray[tuple[int], np.dtype[np.bool]] +AR_b_2d: np.ndarray[tuple[int, int], np.dtype[np.bool]] + +AR_i8_1d: np.ndarray[tuple[int], np.dtype[np.int64]] +AR_i8_2d: np.ndarray[tuple[int, int], np.dtype[np.int64]] + +SC_f8: np.float64 +AR_f8_0d: np.ndarray[tuple[()], np.dtype[np.float64]] +AR_f8_1d: _Array1D[np.float64] +AR_f8_2d: _Array2D[np.float64] +AR_f8_3d: _Array3D[np.float64] +AR_f8_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.float64]] + +AR_f2_2d: _Array2D[np.float16] +AR_f4_1d: _Array1D[np.float32] +AR_f4_2d: _Array2D[np.float32] +AR_f4_3d: _Array3D[np.float32] +AR_f10_2d: _Array2D[np.longdouble] +AR_f10_3d: _Array3D[np.longdouble] + +AR_c16_1d: np.ndarray[tuple[int], np.dtype[np.complex128]] +AR_c16_2d: np.ndarray[tuple[int, int], np.dtype[np.complex128]] + +### + assert_type(np.linalg.tensorsolve(AR_i8, AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorsolve(AR_f4, AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complex128]) +assert_type(np.linalg.tensorsolve(AR_c8, AR_f4), npt.NDArray[np.complex64]) +assert_type(np.linalg.tensorsolve(AR_f4, AR_c8), npt.NDArray[np.complex64]) assert_type(np.linalg.solve(AR_i8, AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.solve(AR_f4, AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complex128]) +assert_type(np.linalg.solve(AR_c8, AR_f4), npt.NDArray[np.complex64]) +assert_type(np.linalg.solve(AR_f4, AR_c8), npt.NDArray[np.complex64]) assert_type(np.linalg.tensorinv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.tensorinv(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorinv(AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.inv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complex128]) -assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[Any]) -assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[Any]) -assert_type(np.linalg.matrix_power(AR_c16, 1), npt.NDArray[Any]) -assert_type(np.linalg.matrix_power(AR_O, 2), npt.NDArray[Any]) +assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complex128]) -assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[np.float64]) +assert_type(np.linalg.matrix_power(AR_i8, 1), npt.NDArray[np.int64]) +assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[np.float64]) +assert_type(np.linalg.matrix_power(AR_c16, 1), npt.NDArray[np.complex128]) +assert_type(np.linalg.matrix_power(AR_O, 2), npt.NDArray[np.object_]) -assert_type(np.linalg.outer(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.float64]) -assert_type(np.linalg.outer(AR_c16, AR_c16), npt.NDArray[np.complex128]) -assert_type(np.linalg.outer(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.linalg.outer(AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complex128]) -assert_type(np.linalg.qr(AR_i8), QRResult) -assert_type(np.linalg.qr(AR_f8), QRResult) -assert_type(np.linalg.qr(AR_c16), QRResult) +assert_type(np.linalg.qr(AR_i8), QRResult[np.float64]) +assert_type(np.linalg.qr(AR_i8, "r"), npt.NDArray[np.float64]) +assert_type(np.linalg.qr(AR_i8, "raw"), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.linalg.qr(AR_f4), QRResult[np.float32]) +assert_type(np.linalg.qr(AR_f4, "r"), npt.NDArray[np.float32]) +assert_type(np.linalg.qr(AR_f4, "raw"), tuple[npt.NDArray[np.float32], npt.NDArray[np.float32]]) +assert_type(np.linalg.qr(AR_f8), QRResult[np.float64]) +assert_type(np.linalg.qr(AR_f8, "r"), npt.NDArray[np.float64]) +assert_type(np.linalg.qr(AR_f8, "raw"), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.linalg.qr(AR_c8), QRResult[np.complex64]) +assert_type(np.linalg.qr(AR_c8, "r"), npt.NDArray[np.complex64]) +assert_type(np.linalg.qr(AR_c8, "raw"), tuple[npt.NDArray[np.complex64], npt.NDArray[np.complex64]]) +assert_type(np.linalg.qr(AR_c16), QRResult[np.complex128]) +assert_type(np.linalg.qr(AR_c16, "r"), npt.NDArray[np.complex128]) +assert_type(np.linalg.qr(AR_c16, "raw"), tuple[npt.NDArray[np.complex128], npt.NDArray[np.complex128]]) +# Mypy bug: `Expression is of type "QRResult[Any]", not "QRResult[Any]"` +assert_type(np.linalg.qr(AR_any), QRResult[Any]) # type: ignore[assert-type] +# Mypy bug: `Expression is of type "ndarray[Any, Any]", not "ndarray[tuple[Any, ...], dtype[Any]]"` +assert_type(np.linalg.qr(AR_any, "r"), npt.NDArray[Any]) # type: ignore[assert-type] +# Mypy bug: `Expression is of type "tuple[Any, ...]", <--snip-->"` +assert_type(np.linalg.qr(AR_any, "raw"), tuple[npt.NDArray[Any], npt.NDArray[Any]]) # type: ignore[assert-type] assert_type(np.linalg.eigvals(AR_i8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) -assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.floating] | npt.NDArray[np.complexfloating]) -assert_type(np.linalg.eigvals(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) +assert_type(np.linalg.eigvals(AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.eigvalsh(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.floating]) +assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.float64]) -assert_type(np.linalg.eig(AR_i8), EigResult) -assert_type(np.linalg.eig(AR_f8), EigResult) -assert_type(np.linalg.eig(AR_c16), EigResult) +assert_type(np.linalg.eig(AR_i8), EigResult[np.complex128]) +assert_type(np.linalg.eig(AR_f4), EigResult[np.complex64]) +assert_type(np.linalg.eig(AR_f8), EigResult[np.complex128]) +assert_type(np.linalg.eig(AR_c8), EigResult[np.complex64]) +assert_type(np.linalg.eig(AR_c16), EigResult[np.complex128]) +# Mypy bug: `Expression is of type "EigResult[Any]", not "EigResult[Any]"` +assert_type(np.linalg.eig(AR_f_), EigResult[Any]) # type: ignore[assert-type] +assert_type(np.linalg.eig(AR_c_), EigResult[Any]) # type: ignore[assert-type] +assert_type(np.linalg.eig(AR_any), EigResult[Any]) # type: ignore[assert-type] -assert_type(np.linalg.eigh(AR_i8), EighResult) -assert_type(np.linalg.eigh(AR_f8), EighResult) -assert_type(np.linalg.eigh(AR_c16), EighResult) +assert_type(np.linalg.eigh(AR_i8), EighResult[np.float64, np.float64]) +assert_type(np.linalg.eigh(AR_f4), EighResult[np.float32, np.float32]) +assert_type(np.linalg.eigh(AR_f8), EighResult[np.float64, np.float64]) +assert_type(np.linalg.eigh(AR_c8), EighResult[np.float32, np.complex64]) +assert_type(np.linalg.eigh(AR_c16), EighResult[np.float64, np.complex128]) +# Mypy bug: `Expression is of type "EighResult[Any, Any]", not "EighResult[Any, Any]"` +assert_type(np.linalg.eigh(AR_any), EighResult[Any, Any]) # type: ignore[assert-type] -assert_type(np.linalg.svd(AR_i8), SVDResult) -assert_type(np.linalg.svd(AR_f8), SVDResult) -assert_type(np.linalg.svd(AR_c16), SVDResult) +assert_type(np.linalg.svd(AR_i8), SVDResult[np.float64, np.float64]) assert_type(np.linalg.svd(AR_i8, compute_uv=False), npt.NDArray[np.float64]) -assert_type(np.linalg.svd(AR_i8, True, False), npt.NDArray[np.float64]) -assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.floating]) -assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.floating]) -assert_type(np.linalg.svd(AR_c16, True, False), npt.NDArray[np.floating]) +assert_type(np.linalg.svd(AR_f4), SVDResult[np.float32, np.float32]) +assert_type(np.linalg.svd(AR_f4, compute_uv=False), npt.NDArray[np.float32]) +assert_type(np.linalg.svd(AR_f8), SVDResult[np.float64, np.float64]) +assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(AR_c8), SVDResult[np.float32, np.complex64]) +assert_type(np.linalg.svd(AR_c8, compute_uv=False), npt.NDArray[np.float32]) +assert_type(np.linalg.svd(AR_c16), SVDResult[np.float64, np.complex128]) +assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(int_list_2d), SVDResult[np.float64, np.float64]) +assert_type(np.linalg.svd(int_list_2d, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(float_list_2d), SVDResult[np.float64, np.float64]) +assert_type(np.linalg.svd(float_list_2d, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(complex_list_2d), SVDResult[np.float64, np.complex128]) +assert_type(np.linalg.svd(complex_list_2d, compute_uv=False), npt.NDArray[np.float64]) +# Mypy bug: `Expression is of type "SVDResult[Any, Any]", not "SVDResult[Any, Any]"` +assert_type(np.linalg.svd(AR_any), SVDResult[Any, Any]) # type: ignore[assert-type] +# Mypy bug: `Expression is of type "ndarray[Any, Any]", not "ndarray[tuple[Any, ...], dtype[Any]]"` +assert_type(np.linalg.svd(AR_any, compute_uv=False), npt.NDArray[Any]) # type: ignore[assert-type] assert_type(np.linalg.svdvals(AR_b), npt.NDArray[np.float64]) assert_type(np.linalg.svdvals(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.svdvals(AR_f4), npt.NDArray[np.float32]) -assert_type(np.linalg.svdvals(AR_c8), npt.NDArray[np.float32]) assert_type(np.linalg.svdvals(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_c8), npt.NDArray[np.float32]) assert_type(np.linalg.svdvals(AR_c16), npt.NDArray[np.float64]) -assert_type(np.linalg.svdvals([[1, 2], [3, 4]]), npt.NDArray[np.float64]) -assert_type(np.linalg.svdvals([[1.0, 2.0], [3.0, 4.0]]), npt.NDArray[np.float64]) -assert_type(np.linalg.svdvals([[1j, 2j], [3j, 4j]]), npt.NDArray[np.float64]) - -assert_type(np.linalg.cond(AR_i8), Any) -assert_type(np.linalg.cond(AR_f8), Any) -assert_type(np.linalg.cond(AR_c16), Any) +assert_type(np.linalg.svdvals(int_list_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(float_list_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(complex_list_2d), npt.NDArray[np.float64]) assert_type(np.linalg.matrix_rank(AR_i8), Any) assert_type(np.linalg.matrix_rank(AR_f8), Any) assert_type(np.linalg.matrix_rank(AR_c16), Any) +assert_type(np.linalg.matrix_rank(SC_f8), Literal[0, 1]) +assert_type(np.linalg.matrix_rank(AR_f8_1d), Literal[0, 1]) +assert_type(np.linalg.matrix_rank(float_list_1d), Literal[0, 1]) +assert_type(np.linalg.matrix_rank(AR_f8_2d), np.int_) +assert_type(np.linalg.matrix_rank(float_list_2d), np.int_) +assert_type(np.linalg.matrix_rank(AR_f8_3d), _Array1D[np.int_]) +assert_type(np.linalg.matrix_rank(float_list_3d), _Array1D[np.int_]) +assert_type(np.linalg.matrix_rank(AR_f8_4d), npt.NDArray[np.int_]) +assert_type(np.linalg.matrix_rank(float_list_4d), npt.NDArray[np.int_]) -assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.cond(AR_i8), Any) +assert_type(np.linalg.cond(AR_f8), Any) +assert_type(np.linalg.cond(AR_c16), Any) +assert_type(np.linalg.cond(AR_f4_2d), np.float32) +assert_type(np.linalg.cond(AR_f8_2d), np.float64) +assert_type(np.linalg.cond(AR_f4_3d), npt.NDArray[np.float32]) +assert_type(np.linalg.cond(AR_f8_3d), npt.NDArray[np.float64]) assert_type(np.linalg.slogdet(AR_i8), SlogdetResult) assert_type(np.linalg.slogdet(AR_f8), SlogdetResult) assert_type(np.linalg.slogdet(AR_c16), SlogdetResult) +assert_type(np.linalg.slogdet(AR_f4_2d), SlogdetResult[np.float32, np.float32]) +assert_type(np.linalg.slogdet(AR_f8_2d), SlogdetResult[np.float64, np.float64]) +assert_type(np.linalg.slogdet(AR_f4_3d), SlogdetResult[npt.NDArray[np.float32], npt.NDArray[np.float32]]) +assert_type(np.linalg.slogdet(AR_f8_3d), SlogdetResult[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.linalg.slogdet(complex_list_2d), SlogdetResult[np.float64, np.complex128]) +assert_type(np.linalg.slogdet(complex_list_3d), SlogdetResult[npt.NDArray[np.float64], npt.NDArray[np.complex128]]) assert_type(np.linalg.det(AR_i8), Any) assert_type(np.linalg.det(AR_f8), Any) assert_type(np.linalg.det(AR_c16), Any) +assert_type(np.linalg.det(AR_f4_2d), np.float32) +assert_type(np.linalg.det(AR_f8_2d), np.float64) +assert_type(np.linalg.det(AR_f4_3d), npt.NDArray[np.float32]) +assert_type(np.linalg.det(AR_f8_3d), npt.NDArray[np.float64]) +assert_type(np.linalg.det(complex_list_2d), np.complex128) +assert_type(np.linalg.det(complex_list_3d), npt.NDArray[np.complex128]) -assert_type(np.linalg.lstsq(AR_i8, AR_i8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], np.int32, npt.NDArray[np.float64]]) -assert_type(np.linalg.lstsq(AR_i8, AR_f8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) -assert_type(np.linalg.lstsq(AR_f8, AR_c16), tuple[npt.NDArray[np.complexfloating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) +assert_type( + np.linalg.lstsq(AR_i8, AR_i8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f4), + tuple[npt.NDArray[np.float32], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) +assert_type( + np.linalg.lstsq(AR_i8, AR_f8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_i8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_f4), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_c8, AR_c8), + tuple[npt.NDArray[np.complex64], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) +assert_type( + np.linalg.lstsq(AR_c8, AR_c16), + tuple[npt.NDArray[np.complex128], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_c16, AR_c8), + tuple[npt.NDArray[np.complex128], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_f8_1d), + tuple[_Array1D[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f4_1d), + tuple[_Array1D[np.float32], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_f8_2d), + tuple[_Array2D[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f4_2d), + tuple[_Array2D[np.float32], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) -assert_type(np.linalg.norm(AR_i8), np.floating) -assert_type(np.linalg.norm(AR_f8), np.floating) -assert_type(np.linalg.norm(AR_c16), np.floating) -assert_type(np.linalg.norm(AR_S), np.floating) -assert_type(np.linalg.norm(AR_f8, axis=0), Any) +assert_type(np.linalg.norm(AR_i8), np.float64) +assert_type(np.linalg.norm(AR_f8), np.float64) +assert_type(np.linalg.norm(AR_c16), np.float64) +# Mypy incorrectly infers `Any` for datetime64 and timedelta64, but pyright behaves correctly. +assert_type(np.linalg.norm(AR_M), np.float64) # type: ignore[assert-type] +assert_type(np.linalg.norm(AR_m), np.float64) # type: ignore[assert-type] +assert_type(np.linalg.norm(AR_U), np.float64) +assert_type(np.linalg.norm(AR_S), np.float64) +assert_type(np.linalg.norm(AR_f8, 0, 1), npt.NDArray[np.float64]) +assert_type(np.linalg.norm(AR_f8, axis=0), npt.NDArray[np.float64]) +assert_type(np.linalg.norm(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.linalg.norm(AR_f8_2d, keepdims=True), _Array2D[np.float64]) +assert_type(np.linalg.norm(AR_f2), np.float16) +assert_type(np.linalg.norm(AR_f2, 0, 1), npt.NDArray[np.float16]) +assert_type(np.linalg.norm(AR_f2, axis=1), npt.NDArray[np.float16]) +assert_type(np.linalg.norm(AR_f2, keepdims=True), npt.NDArray[np.float16]) +assert_type(np.linalg.norm(AR_f2_2d, keepdims=True), _Array2D[np.float16]) +assert_type(np.linalg.norm(AR_f4), np.float32) +assert_type(np.linalg.norm(AR_c8), np.float32) +assert_type(np.linalg.norm(AR_f4, 0, 1), npt.NDArray[np.float32]) +assert_type(np.linalg.norm(AR_f4, axis=1), npt.NDArray[np.float32]) +assert_type(np.linalg.norm(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.linalg.norm(AR_f4_2d, keepdims=True), _Array2D[np.float32]) +assert_type(np.linalg.norm(AR_f10), np.longdouble) +assert_type(np.linalg.norm(AR_c20), np.longdouble) +assert_type(np.linalg.norm(AR_f10, 0, 1), npt.NDArray[np.longdouble]) +assert_type(np.linalg.norm(AR_f10, axis=1), npt.NDArray[np.longdouble]) +assert_type(np.linalg.norm(AR_f10, keepdims=True), npt.NDArray[np.longdouble]) +assert_type(np.linalg.norm(AR_f10_2d, keepdims=True), _Array2D[np.longdouble]) -assert_type(np.linalg.matrix_norm(AR_i8), np.floating) -assert_type(np.linalg.matrix_norm(AR_f8), np.floating) -assert_type(np.linalg.matrix_norm(AR_c16), np.floating) -assert_type(np.linalg.matrix_norm(AR_S), np.floating) +assert_type(np.linalg.matrix_norm(AR_i8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matrix_norm(AR_f8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matrix_norm(AR_c16), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matrix_norm(AR_U), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matrix_norm(AR_S), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matrix_norm(AR_f8_2d), np.float64) +assert_type(np.linalg.matrix_norm(AR_f8_3d), npt.NDArray[np.float64]) +assert_type(np.linalg.matrix_norm(AR_f8_2d, keepdims=True), _Array2D[np.float64]) +assert_type(np.linalg.matrix_norm(AR_f4), npt.NDArray[np.float32] | Any) +assert_type(np.linalg.matrix_norm(AR_c8), npt.NDArray[np.float32] | Any) +assert_type(np.linalg.matrix_norm(AR_f4_2d), np.float32) +assert_type(np.linalg.matrix_norm(AR_f4_3d), npt.NDArray[np.float32]) +assert_type(np.linalg.matrix_norm(AR_f4_2d, keepdims=True), _Array2D[np.float32]) +assert_type(np.linalg.matrix_norm(AR_f10), npt.NDArray[np.longdouble] | Any) +assert_type(np.linalg.matrix_norm(AR_c20), npt.NDArray[np.longdouble] | Any) +assert_type(np.linalg.matrix_norm(AR_f10_2d), np.longdouble) +assert_type(np.linalg.matrix_norm(AR_f10_3d), npt.NDArray[np.longdouble]) +assert_type(np.linalg.matrix_norm(AR_f10_2d, keepdims=True), _Array2D[np.longdouble]) +assert_type(np.linalg.matrix_norm(complex_list_2d), np.float64) +assert_type(np.linalg.matrix_norm(complex_list_3d), npt.NDArray[np.float64]) +assert_type(np.linalg.matrix_norm(complex_list_2d, keepdims=True), npt.NDArray[np.float64]) -assert_type(np.linalg.vector_norm(AR_i8), np.floating) -assert_type(np.linalg.vector_norm(AR_f8), np.floating) -assert_type(np.linalg.vector_norm(AR_c16), np.floating) -assert_type(np.linalg.vector_norm(AR_S), np.floating) +assert_type(np.linalg.vector_norm(AR_i8), np.float64) +assert_type(np.linalg.vector_norm(AR_f8), np.float64) +assert_type(np.linalg.vector_norm(AR_c16), np.float64) +# Mypy incorrectly infers `Any` for datetime64 and timedelta64, but pyright behaves correctly. +assert_type(np.linalg.vector_norm(AR_M), np.float64) # type: ignore[assert-type] +assert_type(np.linalg.vector_norm(AR_m), np.float64) # type: ignore[assert-type] +assert_type(np.linalg.vector_norm(AR_U), np.float64) +assert_type(np.linalg.vector_norm(AR_S), np.float64) +assert_type(np.linalg.vector_norm(AR_f8, axis=0), npt.NDArray[np.float64]) +assert_type(np.linalg.vector_norm(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.linalg.vector_norm(AR_f8_2d, keepdims=True), _Array2D[np.float64]) +assert_type(np.linalg.vector_norm(AR_f2), np.float16) +assert_type(np.linalg.vector_norm(AR_f2, axis=1), npt.NDArray[np.float16]) +assert_type(np.linalg.vector_norm(AR_f2, keepdims=True), npt.NDArray[np.float16]) +assert_type(np.linalg.vector_norm(AR_f2_2d, keepdims=True), _Array2D[np.float16]) +assert_type(np.linalg.vector_norm(AR_f4), np.float32) +assert_type(np.linalg.vector_norm(AR_c8), np.float32) +assert_type(np.linalg.vector_norm(AR_f4, axis=1), npt.NDArray[np.float32]) +assert_type(np.linalg.vector_norm(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.linalg.vector_norm(AR_f4_2d, keepdims=True), _Array2D[np.float32]) +assert_type(np.linalg.vector_norm(AR_f10), np.longdouble) +assert_type(np.linalg.vector_norm(AR_c20), np.longdouble) +assert_type(np.linalg.vector_norm(AR_f10, axis=1), npt.NDArray[np.longdouble]) +assert_type(np.linalg.vector_norm(AR_f10, keepdims=True), npt.NDArray[np.longdouble]) +assert_type(np.linalg.vector_norm(AR_f10_2d, keepdims=True), _Array2D[np.longdouble]) assert_type(np.linalg.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) assert_type(np.linalg.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) @@ -139,16 +365,145 @@ assert_type(np.linalg.tensordot(AR_c16, AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.tensordot(AR_m, AR_m), npt.NDArray[np.timedelta64]) assert_type(np.linalg.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(np.linalg.multi_dot([AR_i8, AR_i8]), Any) -assert_type(np.linalg.multi_dot([AR_i8, AR_f8]), Any) -assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), Any) -assert_type(np.linalg.multi_dot([AR_O, AR_O]), Any) -assert_type(np.linalg.multi_dot([AR_m, AR_m]), Any) +assert_type(np.linalg.multi_dot([AR_i8, AR_i8]), npt.NDArray[np.int64]) +assert_type(np.linalg.multi_dot([AR_f8, AR_f8]), npt.NDArray[np.float64]) +assert_type(np.linalg.multi_dot([AR_c16, AR_c16]), npt.NDArray[np.complex128]) +assert_type(np.linalg.multi_dot([AR_O, AR_O]), npt.NDArray[np.object_]) +# Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.linalg.multi_dot([AR_i8, AR_f8]), npt.NDArray[np.float64 | Any]) # type: ignore[assert-type] +assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), npt.NDArray[np.complex128 | Any]) # type: ignore[assert-type] +assert_type(np.linalg.multi_dot([AR_m, AR_m]), npt.NDArray[np.timedelta64]) # type: ignore[assert-type] + +# Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.linalg.diagonal(AR_any), np.ndarray) # type: ignore[assert-type] +assert_type(np.linalg.diagonal(AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.diagonal(AR_f4_2d), _Array1D[np.float32]) +assert_type(np.linalg.diagonal(AR_f8_2d), _Array1D[np.float64]) +assert_type(np.linalg.diagonal(bool_list_2d), npt.NDArray[np.bool]) +assert_type(np.linalg.diagonal(int_list_2d), npt.NDArray[np.int_]) +assert_type(np.linalg.diagonal(float_list_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.diagonal(complex_list_2d), npt.NDArray[np.complex128]) +assert_type(np.linalg.diagonal(bytes_list_2d), npt.NDArray[np.bytes_]) +assert_type(np.linalg.diagonal(str_list_2d), npt.NDArray[np.str_]) + +assert_type(np.linalg.trace(AR_any), Any) +assert_type(np.linalg.trace(AR_f4), Any) +assert_type(np.linalg.trace(AR_f4_2d), np.float32) +assert_type(np.linalg.trace(AR_f8_2d), np.float64) +assert_type(np.linalg.trace(AR_f4_3d), _Array1D[np.float32]) +assert_type(np.linalg.trace(AR_f8_3d), _Array1D[np.float64]) +assert_type(np.linalg.trace(AR_f8_4d), np.ndarray[tuple[int, *tuple[Any, ...]], np.dtype[np.float64]]) +assert_type(np.linalg.trace(bool_list_2d), np.bool) +assert_type(np.linalg.trace(int_list_2d), np.int_) +assert_type(np.linalg.trace(float_list_2d), np.float64) +assert_type(np.linalg.trace(complex_list_2d), np.complex128) +assert_type(np.linalg.trace(float_list_3d), npt.NDArray[np.float64]) + +assert_type(np.linalg.outer(bool_list_1d, bool_list_1d), _Array2D[np.bool]) +assert_type(np.linalg.outer(int_list_1d, int_list_1d), _Array2D[np.int64]) +assert_type(np.linalg.outer(float_list_1d, float_list_1d), _Array2D[np.float64]) +assert_type(np.linalg.outer(complex_list_1d, complex_list_1d), _Array2D[np.complex128]) +assert_type(np.linalg.outer(AR_i8, AR_i8), _Array2D[np.int64]) +assert_type(np.linalg.outer(AR_f8, AR_f8), _Array2D[np.float64]) +assert_type(np.linalg.outer(AR_c16, AR_c16), _Array2D[np.complex128]) +assert_type(np.linalg.outer(AR_b, AR_b), _Array2D[np.bool]) +assert_type(np.linalg.outer(AR_O, AR_O), _Array2D[np.object_]) +assert_type(np.linalg.outer(AR_i8, AR_m), _Array2D[np.timedelta64]) -assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.cross(int_list_1d, int_list_1d), npt.NDArray[np.int64]) +assert_type(np.linalg.cross(float_list_1d, int_list_1d), npt.NDArray[np.float64]) +assert_type(np.linalg.cross(float_list_1d, complex_list_1d), npt.NDArray[np.complex128]) +assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.cross(AR_f8, AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.cross(AR_f2, AR_f2), npt.NDArray[np.float16]) +assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.cross(AR_c16, AR_i8), npt.NDArray[np.complex128]) +assert_type(np.linalg.cross(AR_c16, AR_f8), npt.NDArray[np.complex128]) +assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.cross(AR_O, AR_f8), npt.NDArray[np.object_]) +assert_type(np.linalg.cross(AR_f8, AR_O), npt.NDArray[np.object_]) +assert_type(np.linalg.cross(AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.float64]) -assert_type(np.linalg.matmul(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_b, AR_b), npt.NDArray[np.bool] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_b), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_b, AR_i8), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_f4, AR_f4), npt.NDArray[np.float32] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_i8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_f8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_c16, AR_i8), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_c16), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_c16, AR_c16), npt.NDArray[np.complex128] | Any) +# same as the block above, but for the 1d x 1d case +assert_type(np.linalg.matmul(AR_b_1d, AR_b_1d), np.bool) +assert_type(np.linalg.matmul(AR_i8_1d, AR_b_1d), np.int64) +assert_type(np.linalg.matmul(AR_b_1d, AR_i8_1d), np.int64) +assert_type(np.linalg.matmul(AR_i8_1d, AR_i8_1d), np.int64) +assert_type(np.linalg.matmul(AR_f4_1d, AR_f4_1d), np.float32) +assert_type(np.linalg.matmul(AR_f8_1d, AR_i8_1d), np.float64) +assert_type(np.linalg.matmul(AR_i8_1d, AR_f8_1d), np.float64) +assert_type(np.linalg.matmul(AR_f8_1d, AR_f8_1d), np.float64) +assert_type(np.linalg.matmul(AR_c16_1d, AR_i8_1d), np.complex128) +assert_type(np.linalg.matmul(AR_f8_1d, AR_c16_1d), np.complex128) +assert_type(np.linalg.matmul(AR_c16_1d, AR_c16_1d), np.complex128) +# 1d x 2d +assert_type(np.linalg.matmul(AR_b_1d, AR_b_2d), npt.NDArray[np.bool]) +assert_type(np.linalg.matmul(AR_i8_1d, AR_b_2d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_b_1d, AR_i8_2d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_i8_1d, AR_i8_2d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_f4_1d, AR_f4_2d), npt.NDArray[np.float32]) +assert_type(np.linalg.matmul(AR_f8_1d, AR_i8_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_i8_1d, AR_f8_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_f8_1d, AR_f8_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_c16_1d, AR_i8_2d), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_f8_1d, AR_c16_2d), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_c16_1d, AR_c16_2d), npt.NDArray[np.complex128]) +# 1d x ?d +assert_type(np.linalg.matmul(AR_b_1d, AR_b), npt.NDArray[np.bool] | Any) +assert_type(np.linalg.matmul(AR_i8_1d, AR_b), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_b_1d, AR_i8), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_i8_1d, AR_i8), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_f4_1d, AR_f4), npt.NDArray[np.float32] | Any) +assert_type(np.linalg.matmul(AR_f8_1d, AR_i8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_i8_1d, AR_f8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_f8_1d, AR_f8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_c16_1d, AR_i8), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_f8_1d, AR_c16), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_c16_1d, AR_c16), npt.NDArray[np.complex128] | Any) +# 2d x 1d +assert_type(np.linalg.matmul(AR_b_2d, AR_b_1d), npt.NDArray[np.bool]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_b_1d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_b_2d, AR_i8_1d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_i8_1d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_f4_2d, AR_f4_1d), npt.NDArray[np.float32]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_i8_1d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_f8_1d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_f8_1d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_c16_2d, AR_i8_1d), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_c16_1d), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_c16_2d, AR_c16_1d), npt.NDArray[np.complex128]) +# 2d x ?d +assert_type(np.linalg.matmul(AR_b_2d, AR_b), npt.NDArray[np.bool]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_b), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_b_2d, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_f4_2d, AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_c16_2d, AR_i8), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_c16_2d, AR_c16), npt.NDArray[np.complex128]) +# ?d x 1d +assert_type(np.linalg.matmul(AR_b, AR_b_1d), npt.NDArray[np.bool] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_b_1d), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_b, AR_i8_1d), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_i8_1d), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_f4, AR_f4_1d), npt.NDArray[np.float32] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_i8_1d), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_f8_1d), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_f8_1d), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_c16, AR_i8_1d), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_c16_1d), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_c16, AR_c16_1d), npt.NDArray[np.complex128] | Any) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 8eef32ddd593..c2d2c4c23f3d 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -1,22 +1,21 @@ -from typing import Any, Generic, Literal, NoReturn, TypeAlias, TypeVar, assert_type +from typing import Any, Literal, NoReturn, assert_type import numpy as np -from numpy import dtype, generic from numpy._typing import NDArray, _AnyShape -_ScalarT = TypeVar("_ScalarT", bound=generic) -_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) +type MaskedArray[ScalarT: np.generic] = np.ma.MaskedArray[_AnyShape, np.dtype[ScalarT]] +type _NoMaskType = np.bool[Literal[False]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] -MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, dtype[_ScalarT]] -_NoMaskType: TypeAlias = np.bool[Literal[False]] -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +### -class MaskedArraySubclass(MaskedArray[_ScalarT_co]): ... +class MaskedArraySubclass[ScalarT: np.generic](np.ma.MaskedArray[_AnyShape, np.dtype[ScalarT]]): ... -class IntoMaskedArraySubClass(Generic[_ScalarT_co]): - def __array__(self) -> MaskedArraySubclass[_ScalarT_co]: ... +class IntoMaskedArraySubClass[ScalarT: np.generic]: + def __array__(self) -> MaskedArraySubclass[ScalarT]: ... -MaskedArraySubclassC: TypeAlias = MaskedArraySubclass[np.complex128] +type MaskedArraySubclassC = MaskedArraySubclass[np.complex128] +type MaskedArraySubclassI = MaskedArraySubclass[np.intp] AR_b: NDArray[np.bool] AR_f4: NDArray[np.float32] @@ -53,6 +52,7 @@ MAR_floating: MaskedArray[np.floating] MAR_number: MaskedArray[np.number] MAR_subclass: MaskedArraySubclassC +MAR_subclass_i: MaskedArraySubclassI MAR_into_subclass: IntoMaskedArraySubClass[np.float32] MAR_1d: np.ma.MaskedArray[tuple[int], np.dtype] @@ -133,12 +133,12 @@ assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.argmin(), np.intp) assert_type(MAR_f4.argmin(), np.intp) -assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) -assert_type(MAR_b.argmin(axis=0), Any) -assert_type(MAR_f4.argmin(axis=0), Any) -assert_type(MAR_b.argmin(keepdims=True), Any) -assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedArraySubclassC) -assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.argmin(fill_value=6.28318, keepdims=False), np.intp) +assert_type(MAR_b.argmin(axis=0), MaskedArray[np.intp]) +assert_type(MAR_f4.argmin(axis=0), MaskedArray[np.intp]) +assert_type(MAR_b.argmin(keepdims=True), MaskedArray[np.intp]) +assert_type(MAR_f4.argmin(out=MAR_subclass_i), MaskedArraySubclassI) +assert_type(MAR_f4.argmin(None, None, out=MAR_subclass_i), MaskedArraySubclassI) assert_type(np.ma.argmin(MAR_b), np.intp) assert_type(np.ma.argmin(MAR_f4), np.intp) @@ -152,11 +152,11 @@ assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubcl assert_type(MAR_b.argmax(), np.intp) assert_type(MAR_f4.argmax(), np.intp) assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) -assert_type(MAR_b.argmax(axis=0), Any) -assert_type(MAR_f4.argmax(axis=0), Any) -assert_type(MAR_b.argmax(keepdims=True), Any) -assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedArraySubclassC) -assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_b.argmax(axis=0), MaskedArray[np.intp]) +assert_type(MAR_f4.argmax(axis=0), MaskedArray[np.intp]) +assert_type(MAR_b.argmax(keepdims=True), MaskedArray[np.intp]) +assert_type(MAR_f4.argmax(out=MAR_subclass_i), MaskedArraySubclassI) +assert_type(MAR_f4.argmax(None, None, out=MAR_subclass_i), MaskedArraySubclassI) assert_type(np.ma.argmax(MAR_b), np.intp) assert_type(np.ma.argmax(MAR_f4), np.intp) @@ -219,7 +219,10 @@ assert_type(MAR_f4.partition(1), None) assert_type(MAR_V.partition(1, axis=0, kind="introselect", order="K"), None) assert_type(MAR_f4.argpartition(1), MaskedArray[np.intp]) -assert_type(MAR_1d.argpartition(1, axis=0, kind="introselect", order="K"), MaskedArray[np.intp]) +assert_type( + MAR_1d.argpartition(1, axis=0, kind="introselect", order="K"), + np.ma.MaskedArray[tuple[int], np.dtype[np.intp]], +) assert_type(np.ma.ndim(f4), int) assert_type(np.ma.ndim(MAR_b), int) @@ -423,8 +426,7 @@ assert_type(MAR_2d_f4.dot(1), MaskedArray[Any]) assert_type(MAR_2d_f4.dot([1]), MaskedArray[Any]) assert_type(MAR_2d_f4.dot(1, out=MAR_subclass), MaskedArraySubclassC) -assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) -assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) +assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], _Array1D[np.intp]]) assert_type(MAR_f8.trace(), Any) assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclassC) @@ -440,10 +442,10 @@ assert_type(MAR_td64.reshape(()), np.ma.MaskedArray[tuple[()], np.dtype[np.timed assert_type(MAR_s.reshape([]), np.ma.MaskedArray[tuple[()], np.dtype[np.str_]]) assert_type(MAR_V.reshape((480, 720, 4)), np.ma.MaskedArray[tuple[int, int, int], np.dtype[np.void]]) -assert_type(MAR_f8.cumprod(), MaskedArray[Any]) +assert_type(MAR_f8.cumprod(), np.ma.MaskedArray[tuple[int], np.dtype[np.float64]]) assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclassC) -assert_type(MAR_f8.cumsum(), MaskedArray[Any]) +assert_type(MAR_f8.cumsum(), np.ma.MaskedArray[tuple[int], np.dtype[np.float64]]) assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.view(), MaskedArray[np.float64]) diff --git a/numpy/typing/tests/data/reveal/matrix.pyi b/numpy/typing/tests/data/reveal/matrix.pyi index 3a32b3d394f0..b76760d547b9 100644 --- a/numpy/typing/tests/data/reveal/matrix.pyi +++ b/numpy/typing/tests/data/reveal/matrix.pyi @@ -1,9 +1,9 @@ -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt -_Shape2D: TypeAlias = tuple[int, int] +type _Shape2D = tuple[int, int] mat: np.matrix[_Shape2D, np.dtype[np.int64]] ar_f8: npt.NDArray[np.float64] diff --git a/numpy/typing/tests/data/reveal/memmap.pyi b/numpy/typing/tests/data/reveal/memmap.pyi index f3e20ed2d5e7..0babdeefb6f1 100644 --- a/numpy/typing/tests/data/reveal/memmap.pyi +++ b/numpy/typing/tests/data/reveal/memmap.pyi @@ -1,19 +1,21 @@ -from typing import Any, assert_type +from typing import Any, Literal, assert_type import numpy as np -memmap_obj: np.memmap[Any, np.dtype[np.str_]] +type _Memmap[ScalarT: np.generic] = np.memmap[tuple[Any, ...], np.dtype[ScalarT]] + +memmap_obj: _Memmap[np.str_] assert_type(np.memmap.__array_priority__, float) assert_type(memmap_obj.__array_priority__, float) assert_type(memmap_obj.filename, str | None) assert_type(memmap_obj.offset, int) -assert_type(memmap_obj.mode, str) +assert_type(memmap_obj.mode, Literal["r", "r+", "w+", "c"]) assert_type(memmap_obj.flush(), None) -assert_type(np.memmap("file.txt", offset=5), np.memmap[Any, np.dtype[np.uint8]]) -assert_type(np.memmap(b"file.txt", dtype=np.float64, shape=(10, 3)), np.memmap[Any, np.dtype[np.float64]]) +assert_type(np.memmap("file.txt", offset=5), _Memmap[np.uint8]) +assert_type(np.memmap(b"file.txt", dtype=np.float64, shape=(10, 3)), _Memmap[np.float64]) with open("file.txt", "rb") as f: - assert_type(np.memmap(f, dtype=float, order="K"), np.memmap[Any, np.dtype]) + assert_type(np.memmap(f, dtype=float, order="K"), np.memmap) assert_type(memmap_obj.__array_finalize__(object()), None) diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index ef07dc0c8c8a..131e9259b6b5 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -27,13 +27,16 @@ f: float AR_b: npt.NDArray[np.bool] AR_m: npt.NDArray[np.timedelta64] +# NOTE: the __divmod__ calls are workarounds for https://github.com/microsoft/pyright/issues/9663 + # Time structures assert_type(m % m, np.timedelta64) assert_type(m % m_nat, np.timedelta64[None]) assert_type(m % m_int0, np.timedelta64[None]) assert_type(m % m_int, np.timedelta64[int | None]) -assert_type(m_nat % m, np.timedelta64[None]) +# NOTE: Mypy incorrectly infers `timedelta64[Any]`, but pyright behaves correctly. +assert_type(m_nat % m, np.timedelta64[None]) # type: ignore[assert-type] assert_type(m_int % m_nat, np.timedelta64[None]) assert_type(m_int % m_int0, np.timedelta64[None]) assert_type(m_int % m_int, np.timedelta64[int | None]) @@ -46,20 +49,22 @@ assert_type(m_td % m_td, np.timedelta64[dt.timedelta | None]) assert_type(AR_m % m, npt.NDArray[np.timedelta64]) assert_type(m % AR_m, npt.NDArray[np.timedelta64]) -assert_type(divmod(m, m), tuple[np.int64, np.timedelta64]) -assert_type(divmod(m, m_nat), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m, m_int0), tuple[np.int64, np.timedelta64[None]]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 +# +# NOTE: Mypy incorrectly infers `tuple[Any, ...]`, but pyright behaves correctly. +assert_type(m.__divmod__(m), tuple[np.int64, np.timedelta64]) # type: ignore[assert-type] +assert_type(m.__divmod__(m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(m.__divmod__(m_int0), tuple[np.int64, np.timedelta64[None]]) assert_type(m.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) -assert_type(divmod(m_nat, m), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_int, m_nat), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_int, m_int0), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_int, m_int), tuple[np.int64, np.timedelta64[int | None]]) -assert_type(divmod(m_int, m_td), tuple[np.int64, np.timedelta64[int | None]]) -assert_type(divmod(m_td, m_nat), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_td, m_int0), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_td, m_int), tuple[np.int64, np.timedelta64[int | None]]) -assert_type(divmod(m_td, m_td), tuple[np.int64, np.timedelta64[dt.timedelta | None]]) +# NOTE: Mypy incorrectly infers `tuple[Any, ...]`, but pyright behaves correctly. +assert_type(m_nat.__divmod__(m), tuple[np.int64, np.timedelta64[None]]) # type: ignore[assert-type] +assert_type(m_int.__divmod__(m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_int.__divmod__(m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_int.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(m_int.__divmod__(m_td), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(m_td.__divmod__(m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_td.__divmod__(m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_td.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(m_td.__divmod__(m_td), tuple[np.int64, np.timedelta64[dt.timedelta | None]]) assert_type(divmod(AR_m, m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) assert_type(divmod(m, AR_m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) @@ -77,7 +82,6 @@ assert_type(b_ % AR_b, npt.NDArray[np.int8]) assert_type(divmod(b_, b), tuple[np.int8, np.int8]) assert_type(divmod(b_, b_), tuple[np.int8, np.int8]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(b_.__divmod__(i), tuple[np.int_, np.int_]) assert_type(b_.__divmod__(f), tuple[np.float64, np.float64]) assert_type(b_.__divmod__(i8), tuple[np.int64, np.int64]) @@ -118,7 +122,6 @@ assert_type(i8 % AR_b, npt.NDArray[np.int64]) assert_type(divmod(i8, b), tuple[np.int64, np.int64]) assert_type(divmod(i8, i4), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(i8.__divmod__(f), tuple[np.float64, np.float64]) assert_type(i8.__divmod__(f8), tuple[np.float64, np.float64]) assert_type(divmod(i8, f4), tuple[np.floating, np.floating]) @@ -142,7 +145,6 @@ assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) assert_type(divmod(f8, i8), tuple[np.float64, np.float64]) assert_type(divmod(i4, i8), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(f4.__divmod__(i8), tuple[np.floating, np.floating]) assert_type(f4.__divmod__(i4), tuple[np.floating, np.floating]) assert_type(AR_b.__divmod__(i8), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) @@ -172,7 +174,6 @@ assert_type(AR_b % f8, npt.NDArray[np.float64]) assert_type(divmod(b, f8), tuple[np.float64, np.float64]) assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(f8.__rdivmod__(f), tuple[np.float64, np.float64]) assert_type(f8.__rdivmod__(f4), tuple[np.float64, np.float64]) assert_type(AR_b.__divmod__(f8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index 424f60df27e7..2c8bab482ff1 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -1,23 +1,31 @@ import datetime as dt -from typing import Any, Literal, TypeVar, assert_type +from typing import Any, Literal, assert_type import numpy as np import numpy.typing as npt -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) - -class SubClass(npt.NDArray[_ScalarT_co]): ... +class SubClass[ScalarT: np.generic](np.ndarray[tuple[Any, ...], np.dtype[ScalarT]]): ... subclass: SubClass[np.float64] +AR_f4_nd: npt.NDArray[np.float32] +AR_f4_1d: np.ndarray[tuple[int], np.dtype[np.float32]] +AR_f4_2d: np.ndarray[tuple[int, int], np.dtype[np.float32]] +AR_f4_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float32]] AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] AR_i8: npt.NDArray[np.int64] AR_u1: npt.NDArray[np.uint8] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] +AR_O_nd: npt.NDArray[np.object_] +AR_O_1d: np.ndarray[tuple[int], np.dtype[np.object_]] +AR_O_2d: np.ndarray[tuple[int, int], np.dtype[np.object_]] -AR_LIKE_f: list[float] +AR_LIKE_b: list[bool] AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] m: np.timedelta64 M: np.datetime64 @@ -61,9 +69,137 @@ assert_type(b_i8_f8_f8.numiter, int) assert_type(b_i8_f8_f8.shape, tuple[Any, ...]) assert_type(b_i8_f8_f8.size, int) -assert_type(np.inner(AR_f8, AR_i8), Any) - -assert_type(np.where([True, True, False]), tuple[npt.NDArray[np.intp], ...]) +# + +assert_type(np.inner(AR_LIKE_b, AR_LIKE_b), np.bool) +assert_type(np.inner(AR_LIKE_b, AR_LIKE_i), np.int_) +assert_type(np.inner(AR_LIKE_b, AR_LIKE_f), np.float64) +assert_type(np.inner(AR_LIKE_b, AR_LIKE_c), np.complex128) +assert_type(np.inner(AR_LIKE_i, AR_LIKE_b), np.int_) +assert_type(np.inner(AR_LIKE_i, AR_LIKE_i), np.int_) +assert_type(np.inner(AR_LIKE_i, AR_LIKE_f), np.float64) +assert_type(np.inner(AR_LIKE_i, AR_LIKE_c), np.complex128) +assert_type(np.inner(AR_LIKE_f, AR_LIKE_b), np.float64) +assert_type(np.inner(AR_LIKE_f, AR_LIKE_i), np.float64) +assert_type(np.inner(AR_LIKE_f, AR_LIKE_f), np.float64) +assert_type(np.inner(AR_LIKE_f, AR_LIKE_c), np.complex128) +assert_type(np.inner(AR_LIKE_c, AR_LIKE_b), np.complex128) +assert_type(np.inner(AR_LIKE_c, AR_LIKE_i), np.complex128) +assert_type(np.inner(AR_LIKE_c, AR_LIKE_f), np.complex128) +assert_type(np.inner(AR_LIKE_c, AR_LIKE_c), np.complex128) + +assert_type(np.inner(AR_f4_1d, AR_f4_1d), np.float32) +assert_type(np.inner(AR_f4_1d, AR_f4_2d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.inner(AR_f4_1d, AR_f4_nd), npt.NDArray[np.float32] | Any) +assert_type(np.inner(AR_f4_2d, AR_f4_1d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.inner(AR_f4_2d, AR_f4_2d), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.inner(AR_f4_2d, AR_f4_nd), npt.NDArray[np.float32] | Any) +assert_type(np.inner(AR_f4_nd, AR_f4_1d), npt.NDArray[np.float32] | Any) +assert_type(np.inner(AR_f4_nd, AR_f4_2d), npt.NDArray[np.float32] | Any) +assert_type(np.inner(AR_f4_nd, AR_f4_nd), npt.NDArray[np.float32] | Any) + +assert_type(np.inner(AR_O_1d, AR_O_1d), Any) +assert_type(np.inner(AR_O_1d, AR_O_2d), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.inner(AR_O_1d, AR_O_nd), npt.NDArray[np.object_] | Any) +assert_type(np.inner(AR_O_2d, AR_O_1d), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.inner(AR_O_2d, AR_O_2d), np.ndarray[tuple[int, int], np.dtype[np.object_]]) +assert_type(np.inner(AR_O_2d, AR_O_nd), npt.NDArray[np.object_] | Any) +assert_type(np.inner(AR_O_nd, AR_O_1d), npt.NDArray[np.object_] | Any) +assert_type(np.inner(AR_O_nd, AR_O_2d), npt.NDArray[np.object_] | Any) +assert_type(np.inner(AR_O_nd, AR_O_nd), npt.NDArray[np.object_] | Any) + +assert_type(np.inner(AR_u1, AR_u1), npt.NDArray[np.uint8] | Any) +assert_type(np.inner(AR_i8, AR_i8), npt.NDArray[np.int64] | Any) +assert_type(np.inner(AR_f8, AR_f8), npt.NDArray[np.float64] | Any) +assert_type(np.inner(AR_c16, AR_c16), npt.NDArray[np.complex128] | Any) + +# + +assert_type(np.dot(AR_LIKE_b, AR_LIKE_b), np.bool) +assert_type(np.dot(AR_LIKE_b, AR_LIKE_i), np.int_) +assert_type(np.dot(AR_LIKE_b, AR_LIKE_f), np.float64) +assert_type(np.dot(AR_LIKE_b, AR_LIKE_c), np.complex128) +assert_type(np.dot(AR_LIKE_i, AR_LIKE_b), np.int_) +assert_type(np.dot(AR_LIKE_i, AR_LIKE_i), np.int_) +assert_type(np.dot(AR_LIKE_i, AR_LIKE_f), np.float64) +assert_type(np.dot(AR_LIKE_i, AR_LIKE_c), np.complex128) +assert_type(np.dot(AR_LIKE_f, AR_LIKE_b), np.float64) +assert_type(np.dot(AR_LIKE_f, AR_LIKE_i), np.float64) +assert_type(np.dot(AR_LIKE_f, AR_LIKE_f), np.float64) +assert_type(np.dot(AR_LIKE_f, AR_LIKE_c), np.complex128) +assert_type(np.dot(AR_LIKE_c, AR_LIKE_b), np.complex128) +assert_type(np.dot(AR_LIKE_c, AR_LIKE_i), np.complex128) +assert_type(np.dot(AR_LIKE_c, AR_LIKE_f), np.complex128) +assert_type(np.dot(AR_LIKE_c, AR_LIKE_c), np.complex128) + +assert_type(np.dot(AR_f4_1d, AR_f4_1d), np.float32) +assert_type(np.dot(AR_f4_1d, AR_f4_2d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.dot(AR_f4_1d, AR_f4_nd), Any) +assert_type(np.dot(AR_f4_2d, AR_f4_1d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.dot(AR_f4_2d, AR_f4_2d), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.dot(AR_f4_2d, AR_f4_nd), Any) +assert_type(np.dot(AR_f4_nd, AR_f4_1d), Any) +assert_type(np.dot(AR_f4_nd, AR_f4_2d), Any) +assert_type(np.dot(AR_f4_nd, AR_f4_nd), Any) + +assert_type(np.dot(AR_O_1d, AR_O_1d), Any) +assert_type(np.dot(AR_O_1d, AR_O_2d), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.dot(AR_O_1d, AR_O_nd), Any) +assert_type(np.dot(AR_O_2d, AR_O_1d), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.dot(AR_O_2d, AR_O_2d), np.ndarray[tuple[int, int], np.dtype[np.object_]]) +assert_type(np.dot(AR_O_2d, AR_O_nd), Any) +assert_type(np.dot(AR_O_nd, AR_O_1d), Any) +assert_type(np.dot(AR_O_nd, AR_O_2d), Any) +assert_type(np.dot(AR_O_nd, AR_O_nd), Any) + +# + +assert_type(np.dot(AR_LIKE_b, AR_LIKE_b), np.bool) +assert_type(np.dot(AR_LIKE_b, AR_LIKE_i), np.int_) +assert_type(np.dot(AR_LIKE_b, AR_LIKE_f), np.float64) +assert_type(np.dot(AR_LIKE_b, AR_LIKE_c), np.complex128) +assert_type(np.dot(AR_LIKE_i, AR_LIKE_b), np.int_) +assert_type(np.dot(AR_LIKE_i, AR_LIKE_i), np.int_) +assert_type(np.dot(AR_LIKE_i, AR_LIKE_f), np.float64) +assert_type(np.dot(AR_LIKE_i, AR_LIKE_c), np.complex128) +assert_type(np.dot(AR_LIKE_f, AR_LIKE_b), np.float64) +assert_type(np.dot(AR_LIKE_f, AR_LIKE_i), np.float64) +assert_type(np.dot(AR_LIKE_f, AR_LIKE_f), np.float64) +assert_type(np.dot(AR_LIKE_f, AR_LIKE_c), np.complex128) +assert_type(np.dot(AR_LIKE_c, AR_LIKE_b), np.complex128) +assert_type(np.dot(AR_LIKE_c, AR_LIKE_i), np.complex128) +assert_type(np.dot(AR_LIKE_c, AR_LIKE_f), np.complex128) +assert_type(np.dot(AR_LIKE_c, AR_LIKE_c), np.complex128) + +assert_type(np.dot(AR_f4_1d, AR_f4_1d), np.float32) +assert_type(np.dot(AR_f4_1d, AR_f4_2d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.dot(AR_f4_1d, AR_f4_nd), Any) +assert_type(np.dot(AR_f4_2d, AR_f4_1d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.dot(AR_f4_2d, AR_f4_2d), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.dot(AR_f4_2d, AR_f4_nd), Any) +assert_type(np.dot(AR_f4_nd, AR_f4_1d), Any) +assert_type(np.dot(AR_f4_nd, AR_f4_2d), Any) +assert_type(np.dot(AR_f4_nd, AR_f4_nd), Any) + +assert_type(np.dot(AR_O_1d, AR_O_1d), Any) +assert_type(np.dot(AR_O_1d, AR_O_2d), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.dot(AR_O_1d, AR_O_nd), Any) +assert_type(np.dot(AR_O_2d, AR_O_1d), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.dot(AR_O_2d, AR_O_2d), np.ndarray[tuple[int, int], np.dtype[np.object_]]) +assert_type(np.dot(AR_O_2d, AR_O_nd), Any) +assert_type(np.dot(AR_O_nd, AR_O_1d), Any) +assert_type(np.dot(AR_O_nd, AR_O_2d), Any) +assert_type(np.dot(AR_O_nd, AR_O_nd), Any) + +# + +type _Int1D = np.ndarray[tuple[int], np.dtype[np.intp]] + +assert_type(np.where([True, True, False]), tuple[_Int1D,]) +assert_type(np.where(AR_f4_1d), tuple[_Int1D]) +assert_type(np.where(AR_f4_2d), tuple[_Int1D, _Int1D]) +assert_type(np.where(AR_f4_3d), tuple[_Int1D, _Int1D, _Int1D]) +assert_type(np.where(AR_f4_nd), tuple[_Int1D, ...]) assert_type(np.where([True, True, False], 1, 0), npt.NDArray[Any]) assert_type(np.lexsort([0, 1, 2]), npt.NDArray[np.intp]) @@ -79,16 +215,11 @@ assert_type(np.result_type(int, [1]), np.dtype) assert_type(np.result_type(AR_f8, AR_u1), np.dtype) assert_type(np.result_type(AR_f8, np.complex128), np.dtype) -assert_type(np.dot(AR_LIKE_f, AR_i8), Any) -assert_type(np.dot(AR_u1, 1), Any) -assert_type(np.dot(1.5j, 1), Any) -assert_type(np.dot(AR_u1, 1, out=AR_f8), npt.NDArray[np.float64]) - assert_type(np.vdot(AR_LIKE_f, AR_i8), np.floating) assert_type(np.vdot(AR_u1, 1), np.signedinteger) assert_type(np.vdot(1.5j, 1), np.complexfloating) -assert_type(np.bincount(AR_i8), npt.NDArray[np.intp]) +assert_type(np.bincount(AR_i8), np.ndarray[tuple[int], np.dtype[np.intp]]) assert_type(np.copyto(AR_f8, [1., 1.5, 1.6]), None) @@ -170,9 +301,10 @@ assert_type(np.busday_count("2011-01", "2011-02"), np.int_) assert_type(np.busday_count(["2011-01"], "2011-02"), npt.NDArray[np.int_]) assert_type(np.busday_count(["2011-01"], date_scalar), npt.NDArray[np.int_]) -assert_type(np.busday_offset(M, m), np.datetime64) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.busday_offset(M, m), np.datetime64) # type: ignore[assert-type] +assert_type(np.busday_offset(M, 5), np.datetime64) # type: ignore[assert-type] assert_type(np.busday_offset(date_scalar, m), np.datetime64) -assert_type(np.busday_offset(M, 5), np.datetime64) assert_type(np.busday_offset(AR_M, m), npt.NDArray[np.datetime64]) assert_type(np.busday_offset(M, timedelta_seq), npt.NDArray[np.datetime64]) assert_type(np.busday_offset("2011-01", "2011-02", roll="forward"), np.datetime64) @@ -182,7 +314,8 @@ assert_type(np.is_busday("2012"), np.bool) assert_type(np.is_busday(date_scalar), np.bool) assert_type(np.is_busday(["2012"]), npt.NDArray[np.bool]) -assert_type(np.datetime_as_string(M), np.str_) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.datetime_as_string(M), np.str_) # type: ignore[assert-type] assert_type(np.datetime_as_string(AR_M), npt.NDArray[np.str_]) assert_type(np.busdaycalendar(holidays=date_seq), np.busdaycalendar) diff --git a/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/numpy/typing/tests/data/reveal/nbit_base_example.pyi index 66470b95bf15..c6e931eaca84 100644 --- a/numpy/typing/tests/data/reveal/nbit_base_example.pyi +++ b/numpy/typing/tests/data/reveal/nbit_base_example.pyi @@ -1,13 +1,10 @@ -from typing import TypeVar, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt from numpy._typing import _32Bit, _64Bit -T1 = TypeVar("T1", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -T2 = TypeVar("T2", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] - -def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: ... +def add[T1: npt.NBitBase, T2: npt.NBitBase](a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: ... # type: ignore[deprecated] i8: np.int64 i4: np.int32 diff --git a/numpy/typing/tests/data/reveal/ndarray_assignability.pyi b/numpy/typing/tests/data/reveal/ndarray_assignability.pyi index feaccf28f578..e8ccc573d642 100644 --- a/numpy/typing/tests/data/reveal/ndarray_assignability.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_assignability.pyi @@ -1,39 +1,36 @@ -from typing import Any, Protocol, TypeAlias, TypeVar, assert_type +from typing import Any, Protocol, assert_type import numpy as np from numpy._typing import _64Bit -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) +class CanAbs[T](Protocol): + def __abs__(self, /) -> T: ... -class CanAbs(Protocol[_T_co]): - def __abs__(self, /) -> _T_co: ... +class CanInvert[T](Protocol): + def __invert__(self, /) -> T: ... -class CanInvert(Protocol[_T_co]): - def __invert__(self, /) -> _T_co: ... +class CanNeg[T](Protocol): + def __neg__(self, /) -> T: ... -class CanNeg(Protocol[_T_co]): - def __neg__(self, /) -> _T_co: ... +class CanPos[T](Protocol): + def __pos__(self, /) -> T: ... -class CanPos(Protocol[_T_co]): - def __pos__(self, /) -> _T_co: ... +def do_abs[T](x: CanAbs[T]) -> T: ... +def do_invert[T](x: CanInvert[T]) -> T: ... +def do_neg[T](x: CanNeg[T]) -> T: ... +def do_pos[T](x: CanPos[T]) -> T: ... -def do_abs(x: CanAbs[_T]) -> _T: ... -def do_invert(x: CanInvert[_T]) -> _T: ... -def do_neg(x: CanNeg[_T]) -> _T: ... -def do_pos(x: CanPos[_T]) -> _T: ... - -_Bool_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.bool]] -_UInt8_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.uint8]] -_Int16_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.int16]] -_LongLong_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longlong]] -_Float32_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float32]] -_Float64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] -_LongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longdouble]] -_Complex64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex64]] -_Complex128_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] -_CLongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.clongdouble]] -_Void_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.void]] +type _Bool_1d = np.ndarray[tuple[int], np.dtype[np.bool]] +type _UInt8_1d = np.ndarray[tuple[int], np.dtype[np.uint8]] +type _Int16_1d = np.ndarray[tuple[int], np.dtype[np.int16]] +type _LongLong_1d = np.ndarray[tuple[int], np.dtype[np.longlong]] +type _Float32_1d = np.ndarray[tuple[int], np.dtype[np.float32]] +type _Float64_1d = np.ndarray[tuple[int], np.dtype[np.float64]] +type _LongDouble_1d = np.ndarray[tuple[int], np.dtype[np.longdouble]] +type _Complex64_1d = np.ndarray[tuple[int], np.dtype[np.complex64]] +type _Complex128_1d = np.ndarray[tuple[int], np.dtype[np.complex128]] +type _CLongDouble_1d = np.ndarray[tuple[int], np.dtype[np.clongdouble]] +type _Void_1d = np.ndarray[tuple[int], np.dtype[np.void]] b1_1d: _Bool_1d u1_1d: _UInt8_1d diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 2972a58c328f..ba718421af88 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -7,6 +7,7 @@ function-based counterpart in `../from_numeric.py`. """ import ctypes as ct +import datetime as dt import operator from collections.abc import Iterator from types import ModuleType @@ -16,11 +17,17 @@ from typing_extensions import CapsuleType import numpy as np import numpy.typing as npt -class SubClass(npt.NDArray[np.object_]): ... +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.object_]]): ... f8: np.float64 i8: np.int64 +b1: np.bool +m8_ns: np.timedelta64[int] +m8_ms: np.timedelta64[dt.timedelta] +m8_na: np.timedelta64[None] + B: SubClass + AR_f8: npt.NDArray[np.float64] AR_i8: npt.NDArray[np.int64] AR_u1: npt.NDArray[np.uint8] @@ -33,6 +40,8 @@ AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] +AR_any: np.ndarray + ctypes_obj = AR_f8.ctypes assert_type(AR_f8.__dlpack__(), CapsuleType) @@ -49,25 +58,41 @@ assert_type(ctypes_obj.strides_as(ct.c_ubyte), ct.Array[ct.c_ubyte]) assert_type(f8.all(), np.bool) assert_type(AR_f8.all(), np.bool) -assert_type(AR_f8.all(axis=0), np.bool | npt.NDArray[np.bool]) -assert_type(AR_f8.all(keepdims=True), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.all(axis=0), npt.NDArray[np.bool]) +assert_type(AR_f8.all(keepdims=True), npt.NDArray[np.bool]) +assert_type(AR_f8_1d.all(keepdims=True), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(AR_f8_2d.all(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(AR_f8_3d.all(keepdims=True), np.ndarray[tuple[int, int, int], np.dtype[np.bool]]) assert_type(AR_f8.all(out=B), SubClass) assert_type(f8.any(), np.bool) assert_type(AR_f8.any(), np.bool) -assert_type(AR_f8.any(axis=0), np.bool | npt.NDArray[np.bool]) -assert_type(AR_f8.any(keepdims=True), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.any(axis=0), npt.NDArray[np.bool]) +assert_type(AR_f8.any(keepdims=True), npt.NDArray[np.bool]) +assert_type(AR_f8_1d.any(keepdims=True), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(AR_f8_2d.any(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(AR_f8_3d.any(keepdims=True), np.ndarray[tuple[int, int, int], np.dtype[np.bool]]) assert_type(AR_f8.any(out=B), SubClass) +# same as below assert_type(f8.argmax(), np.intp) assert_type(AR_f8.argmax(), np.intp) -assert_type(AR_f8.argmax(axis=0), Any) -assert_type(AR_f8.argmax(out=AR_i8), npt.NDArray[np.intp]) - +assert_type(AR_f8.argmax(axis=0), npt.NDArray[np.intp]) +assert_type(AR_f8.argmax(out=AR_i8), npt.NDArray[np.int64]) +assert_type(AR_f8.argmax(keepdims=True), npt.NDArray[np.intp]) +assert_type(AR_f8_1d.argmax(keepdims=True), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(AR_f8_2d.argmax(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.intp]]) +assert_type(AR_f8_3d.argmax(keepdims=True), np.ndarray[tuple[int, int, int], np.dtype[np.intp]]) + +# same as above assert_type(f8.argmin(), np.intp) assert_type(AR_f8.argmin(), np.intp) -assert_type(AR_f8.argmin(axis=0), Any) -assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.intp]) +assert_type(AR_f8.argmin(axis=0), npt.NDArray[np.intp]) +assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.int64]) +assert_type(AR_f8.argmin(keepdims=True), npt.NDArray[np.intp]) +assert_type(AR_f8_1d.argmin(keepdims=True), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(AR_f8_2d.argmin(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.intp]]) +assert_type(AR_f8_3d.argmin(keepdims=True), np.ndarray[tuple[int, int, int], np.dtype[np.intp]]) assert_type(f8.argsort(), npt.NDArray[np.intp]) assert_type(AR_f8.argsort(), npt.NDArray[np.intp]) @@ -94,37 +119,182 @@ assert_type(f8.conjugate(), np.float64) assert_type(AR_f8.conjugate(), npt.NDArray[np.float64]) assert_type(B.conjugate(), SubClass) -assert_type(f8.cumprod(), npt.NDArray[Any]) -assert_type(AR_f8.cumprod(), npt.NDArray[Any]) +assert_type(AR_i8.cumprod(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(AR_i8.cumprod(axis=0), npt.NDArray[np.int64]) +assert_type(AR_f8.cumprod(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.cumprod(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.cumprod(dtype=np.float32), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(AR_f8.cumprod(dtype=np.float32, axis=0), npt.NDArray[np.float32]) assert_type(AR_f8.cumprod(out=B), SubClass) - -assert_type(f8.cumsum(), npt.NDArray[Any]) -assert_type(AR_f8.cumsum(), npt.NDArray[Any]) +assert_type(AR_f8_2d.cumprod(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.cumprod(axis=0), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.cumprod(dtype=np.float32), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(AR_f8_2d.cumprod(dtype=np.float32, axis=0), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_any.cumprod(), np.ndarray[tuple[int]]) +assert_type(AR_any.cumprod(axis=0), np.ndarray) + +assert_type(AR_i8.cumsum(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(AR_i8.cumsum(axis=0), npt.NDArray[np.int64]) +assert_type(AR_f8.cumsum(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.cumsum(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.cumsum(dtype=np.float32), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(AR_f8.cumsum(dtype=np.float32, axis=0), npt.NDArray[np.float32]) assert_type(AR_f8.cumsum(out=B), SubClass) - -assert_type(f8.max(), Any) -assert_type(AR_f8.max(), Any) -assert_type(AR_f8.max(axis=0), Any) -assert_type(AR_f8.max(keepdims=True), Any) -assert_type(AR_f8.max(out=B), SubClass) - +assert_type(AR_f8_2d.cumsum(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.cumsum(axis=0), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.cumsum(dtype=np.float32), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(AR_f8_2d.cumsum(dtype=np.float32, axis=0), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_any.cumsum(), np.ndarray[tuple[int]]) +assert_type(AR_any.cumsum(axis=0), np.ndarray) + +# same as below (but without `timedelta64`) +assert_type(b1.prod(), np.int_) +assert_type(i8.prod(), np.int64) +assert_type(f8.prod(), np.float64) +assert_type(AR_i8.prod(), np.int64) +assert_type(AR_i8.prod(keepdims=True), npt.NDArray[np.int64]) +assert_type(AR_i8.prod(axis=0), npt.NDArray[np.int64]) +assert_type(AR_i8.prod(axis=0, keepdims=True), npt.NDArray[np.int64]) +assert_type(AR_f8.prod(), np.float64) +assert_type(AR_f8.prod(keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.prod(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.prod(axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.prod(dtype=np.float32), np.float32) +assert_type(AR_f8.prod(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8.prod(dtype=np.float32, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.prod(dtype=np.float32, axis=0, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.prod(out=B), SubClass) +assert_type(AR_f8_2d.prod(), np.float64) +assert_type(AR_f8_2d.prod(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.prod(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8_2d.prod(axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.prod(dtype=np.float32), np.float32) +assert_type(AR_f8_2d.prod(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8_2d.prod(dtype=np.float32, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_f8_2d.prod(dtype=np.float32, axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_any.prod(), Any) + +# same as above (but also accept `timedelta64`) +assert_type(b1.sum(), np.int_) +assert_type(i8.sum(), np.int64) +assert_type(f8.sum(), np.float64) +assert_type(m8_ns.sum(), np.timedelta64[int]) +assert_type(m8_ms.sum(), np.timedelta64[dt.timedelta]) +assert_type(m8_na.sum(), np.timedelta64[None]) +assert_type(AR_i8.sum(), np.int64) +assert_type(AR_i8.sum(keepdims=True), npt.NDArray[np.int64]) +assert_type(AR_i8.sum(axis=0), npt.NDArray[np.int64]) +assert_type(AR_i8.sum(axis=0, keepdims=True), npt.NDArray[np.int64]) +assert_type(AR_f8.sum(), np.float64) +assert_type(AR_f8.sum(keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.sum(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.sum(axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.sum(dtype=np.float32), np.float32) +assert_type(AR_f8.sum(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8.sum(dtype=np.float32, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.sum(dtype=np.float32, axis=0, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.sum(out=B), SubClass) +assert_type(AR_f8_2d.sum(), np.float64) +assert_type(AR_f8_2d.sum(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.sum(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8_2d.sum(axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.sum(dtype=np.float32), np.float32) +assert_type(AR_f8_2d.sum(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8_2d.sum(dtype=np.float32, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_f8_2d.sum(dtype=np.float32, axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_any.sum(), Any) + +# same as below assert_type(f8.mean(), Any) -assert_type(AR_f8.mean(), Any) -assert_type(AR_f8.mean(axis=0), Any) -assert_type(AR_f8.mean(keepdims=True), Any) +assert_type(AR_f8.mean(), np.float64) +assert_type(AR_f8.mean(keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.mean(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.mean(axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.mean(dtype=np.float32), np.float32) +assert_type(AR_f8.mean(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8.mean(dtype=np.float32, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.mean(dtype=np.float32, axis=0, keepdims=True), npt.NDArray[np.float32]) assert_type(AR_f8.mean(out=B), SubClass) +assert_type(AR_f8_2d.mean(), np.float64) +assert_type(AR_f8_2d.mean(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.mean(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8_2d.mean(axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.mean(dtype=np.float32), np.float32) +assert_type(AR_f8_2d.mean(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8_2d.mean(dtype=np.float32, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_f8_2d.mean(dtype=np.float32, axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) + +# same as above +assert_type(f8.std(), Any) +assert_type(AR_f8.std(), np.float64) +assert_type(AR_f8.std(keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.std(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.std(axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.std(dtype=np.float32), np.float32) +assert_type(AR_f8.std(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8.std(dtype=np.float32, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.std(dtype=np.float32, axis=0, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.std(out=B), SubClass) +assert_type(AR_f8_2d.std(), np.float64) +assert_type(AR_f8_2d.std(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.std(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8_2d.std(axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.std(dtype=np.float32), np.float32) +assert_type(AR_f8_2d.std(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8_2d.std(dtype=np.float32, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_f8_2d.std(dtype=np.float32, axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) + +# same as above +assert_type(f8.var(), Any) +assert_type(AR_f8.var(), np.float64) +assert_type(AR_f8.var(keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.var(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.var(axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.var(dtype=np.float32), np.float32) +assert_type(AR_f8.var(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8.var(dtype=np.float32, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.var(dtype=np.float32, axis=0, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.var(out=B), SubClass) +assert_type(AR_f8_2d.var(), np.float64) +assert_type(AR_f8_2d.var(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.var(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8_2d.var(axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.var(dtype=np.float32), np.float32) +assert_type(AR_f8_2d.var(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8_2d.var(dtype=np.float32, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_f8_2d.var(dtype=np.float32, axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) + +# same as below +assert_type(f8.max(), Any) +assert_type(AR_i8.max(), np.int64) +assert_type(AR_i8.max(keepdims=True), npt.NDArray[np.int64]) +assert_type(AR_i8.max(axis=0), npt.NDArray[np.int64]) +assert_type(AR_i8.max(axis=0, keepdims=True), npt.NDArray[np.int64]) +assert_type(AR_f8.max(), np.float64) +assert_type(AR_f8.max(keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.max(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.max(axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.max(out=B), SubClass) +assert_type(AR_f8_2d.max(), np.float64) +assert_type(AR_f8_2d.max(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.max(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8_2d.max(axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +# same as above assert_type(f8.min(), Any) -assert_type(AR_f8.min(), Any) -assert_type(AR_f8.min(axis=0), Any) -assert_type(AR_f8.min(keepdims=True), Any) +assert_type(AR_i8.min(), np.int64) +assert_type(AR_i8.min(keepdims=True), npt.NDArray[np.int64]) +assert_type(AR_i8.min(axis=0), npt.NDArray[np.int64]) +assert_type(AR_i8.min(axis=0, keepdims=True), npt.NDArray[np.int64]) +assert_type(AR_f8.min(), np.float64) +assert_type(AR_f8.min(keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.min(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.min(axis=0, keepdims=True), npt.NDArray[np.float64]) assert_type(AR_f8.min(out=B), SubClass) - -assert_type(f8.prod(), Any) -assert_type(AR_f8.prod(), Any) -assert_type(AR_f8.prod(axis=0), Any) -assert_type(AR_f8.prod(keepdims=True), Any) -assert_type(AR_f8.prod(out=B), SubClass) +assert_type(AR_f8_2d.min(), np.float64) +assert_type(AR_f8_2d.min(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.min(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8_2d.min(axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) assert_type(f8.round(), np.float64) assert_type(AR_f8.round(), npt.NDArray[np.float64]) @@ -137,39 +307,30 @@ assert_type(AR_f8.repeat(1, axis=0), npt.NDArray[np.float64]) assert_type(B.repeat(1), np.ndarray[tuple[int], np.dtype[np.object_]]) assert_type(B.repeat(1, axis=0), npt.NDArray[np.object_]) -assert_type(f8.std(), Any) -assert_type(AR_f8.std(), Any) -assert_type(AR_f8.std(axis=0), Any) -assert_type(AR_f8.std(keepdims=True), Any) -assert_type(AR_f8.std(out=B), SubClass) - -assert_type(f8.sum(), Any) -assert_type(AR_f8.sum(), Any) -assert_type(AR_f8.sum(axis=0), Any) -assert_type(AR_f8.sum(keepdims=True), Any) -assert_type(AR_f8.sum(out=B), SubClass) - assert_type(f8.take(0), np.float64) assert_type(AR_f8.take(0), np.float64) assert_type(AR_f8.take([0]), npt.NDArray[np.float64]) assert_type(AR_f8.take(0, out=B), SubClass) assert_type(AR_f8.take([0], out=B), SubClass) -assert_type(f8.var(), Any) -assert_type(AR_f8.var(), Any) -assert_type(AR_f8.var(axis=0), Any) -assert_type(AR_f8.var(keepdims=True), Any) -assert_type(AR_f8.var(out=B), SubClass) - +assert_type(AR_f8.argpartition(0), npt.NDArray[np.intp]) assert_type(AR_f8.argpartition([0]), npt.NDArray[np.intp]) +assert_type(AR_f8.argpartition(0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) assert_type(AR_f8.diagonal(), npt.NDArray[np.float64]) +assert_type(AR_f8_2d.diagonal(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8_3d.diagonal(), np.ndarray[tuple[int, int], np.dtype[np.float64]]) assert_type(AR_f8.dot(1), npt.NDArray[Any]) assert_type(AR_f8.dot([1]), Any) assert_type(AR_f8.dot(1, out=B), SubClass) -assert_type(AR_f8.nonzero(), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +type _Int1D = np.ndarray[tuple[int], np.dtype[np.intp]] + +assert_type(AR_f8.nonzero(), tuple[_Int1D, ...]) +assert_type(AR_f8_1d.nonzero(), tuple[_Int1D]) +assert_type(AR_f8_2d.nonzero(), tuple[_Int1D, _Int1D]) +assert_type(AR_f8_3d.nonzero(), tuple[_Int1D, _Int1D, _Int1D]) assert_type(AR_f8.searchsorted(1), np.intp) assert_type(AR_f8.searchsorted([1]), npt.NDArray[np.intp]) diff --git a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi index 0ce599a40310..6bbe057ff5b7 100644 --- a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi @@ -1,11 +1,11 @@ -from typing import TypeAlias, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt -_ArrayND: TypeAlias = npt.NDArray[np.int64] -_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[np.int8]] -_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[np.bool]] +type _ArrayND = npt.NDArray[np.int64] +type _Array2D = np.ndarray[tuple[int, int], np.dtype[np.int8]] +type _Array3D = np.ndarray[tuple[int, int, int], np.dtype[np.bool]] _nd: _ArrayND _2d: _Array2D diff --git a/numpy/typing/tests/data/reveal/npyio.pyi b/numpy/typing/tests/data/reveal/npyio.pyi index e3eaa45a5fa1..d230f5a3d640 100644 --- a/numpy/typing/tests/data/reveal/npyio.pyi +++ b/numpy/typing/tests/data/reveal/npyio.pyi @@ -18,6 +18,8 @@ npz_file: np.lib.npyio.NpzFile AR_i8: npt.NDArray[np.int64] AR_LIKE_f8: list[float] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] + class BytesWriter: def write(self, data: bytes) -> None: ... @@ -68,11 +70,11 @@ assert_type(np.loadtxt(str_path, delimiter="\n"), npt.NDArray[np.float64]) assert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64]) assert_type(np.loadtxt(["1", "2", "3"]), npt.NDArray[np.float64]) -assert_type(np.fromregex(bytes_file, "test", np.float64), npt.NDArray[np.float64]) -assert_type(np.fromregex(str_file, b"test", dtype=float), npt.NDArray[Any]) -assert_type(np.fromregex(str_path, re.compile("test"), dtype=np.str_, encoding="utf8"), npt.NDArray[np.str_]) -assert_type(np.fromregex(pathlib_path, "test", np.float64), npt.NDArray[np.float64]) -assert_type(np.fromregex(bytes_reader, "test", np.float64), npt.NDArray[np.float64]) +assert_type(np.fromregex(bytes_file, "test", np.float64), _Array1D[np.float64]) +assert_type(np.fromregex(str_file, b"test", dtype=float), _Array1D[Any]) +assert_type(np.fromregex(str_path, re.compile("test"), dtype=np.str_, encoding="utf8"), _Array1D[np.str_]) +assert_type(np.fromregex(pathlib_path, "test", np.float64), _Array1D[np.float64]) +assert_type(np.fromregex(bytes_reader, "test", np.float64), _Array1D[np.float64]) assert_type(np.genfromtxt(bytes_file), npt.NDArray[Any]) assert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_]) diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index 24f97d2d0784..7b3abc2d6761 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -10,7 +10,7 @@ from typing import Any, assert_type import numpy as np import numpy.typing as npt -class SubClass(npt.NDArray[np.int64]): ... +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.int64]]): ... i8: np.int64 @@ -46,6 +46,8 @@ assert_type(np.argwhere(AR_i8), np.ndarray[tuple[int, int], np.dtype[np.intp]]) assert_type(np.flatnonzero(i8), np.ndarray[tuple[int], np.dtype[np.intp]]) assert_type(np.flatnonzero(AR_i8), np.ndarray[tuple[int], np.dtype[np.intp]]) +# NOTE: Mypy incorrectly infers `np.ndarray[Any, Any]` for timedelta64 + # correlate assert_type(np.correlate(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) assert_type(np.correlate(AR_b, AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) @@ -55,7 +57,7 @@ assert_type(np.correlate(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float assert_type(np.correlate(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) assert_type(np.correlate(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) assert_type(np.correlate(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) -assert_type(np.correlate(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.correlate(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) # type: ignore[assert-type] assert_type(np.correlate(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) assert_type(np.correlate(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) assert_type(np.correlate(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) @@ -72,7 +74,7 @@ assert_type(np.convolve(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float6 assert_type(np.convolve(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) assert_type(np.convolve(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) assert_type(np.convolve(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) -assert_type(np.convolve(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.convolve(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) # type: ignore[assert-type] assert_type(np.convolve(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) assert_type(np.convolve(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) assert_type(np.convolve(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) @@ -89,7 +91,7 @@ assert_type(np.outer(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.floa assert_type(np.outer(AR_f8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) assert_type(np.outer(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) assert_type(np.outer(AR_c16, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) -assert_type(np.outer(AR_m, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]) +assert_type(np.outer(AR_m, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]) # type: ignore[assert-type] assert_type(np.outer(AR_i8, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64 | Any]]) assert_type(np.outer(AR_O, AR_O), np.ndarray[tuple[int, int], np.dtype[np.object_]]) assert_type(np.outer(AR_i8, AR_i8, out=_sub_nd_i8), SubClass) diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi index 4c4899ad6308..faba91273c91 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -1,30 +1,29 @@ from collections.abc import Sequence from decimal import Decimal -from typing import Any, Literal as L, TypeAlias, TypeVar, assert_type +from typing import Any, Literal as L, assert_type import numpy as np import numpy.polynomial as npp import numpy.typing as npt -_Ar_x: TypeAlias = npt.NDArray[np.inexact | np.object_] -_Ar_f: TypeAlias = npt.NDArray[np.floating] -_Ar_c: TypeAlias = npt.NDArray[np.complexfloating] -_Ar_O: TypeAlias = npt.NDArray[np.object_] +type _Ar_x = npt.NDArray[np.inexact | np.object_] +type _Ar_f = npt.NDArray[np.floating] +type _Ar_c = npt.NDArray[np.complexfloating] +type _Ar_O = npt.NDArray[np.object_] -_Ar_x_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.inexact | np.object_]] -_Ar_f_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] -_Ar_c_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] -_Ar_O_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] +type _Ar_x_n = np.ndarray[tuple[int], np.dtype[np.inexact | np.object_]] +type _Ar_f_n = np.ndarray[tuple[int], np.dtype[np.floating]] +type _Ar_c_n = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +type _Ar_O_n = np.ndarray[tuple[int], np.dtype[np.object_]] -_Ar_x_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.float64 | Any]] -_Ar_f_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.floating]] -_Ar_c_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating]] -_Ar_O_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] +type _Ar_x_2 = np.ndarray[tuple[L[2]], np.dtype[np.float64 | Any]] +type _Ar_f_2 = np.ndarray[tuple[L[2]], np.dtype[np.floating]] +type _Ar_c_2 = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating]] +type _Ar_O_2 = np.ndarray[tuple[L[2]], np.dtype[np.object_]] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_Ar_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +type _Ar_1d[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] -_BasisName: TypeAlias = L["X"] +type _BasisName = L["X"] SC_i: np.int_ SC_i_co: int | np.int_ diff --git a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi index 07d6c9d1af65..9c5aff1117dc 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi @@ -1,20 +1,20 @@ from collections.abc import Sequence from decimal import Decimal from fractions import Fraction -from typing import Literal as L, TypeAlias, assert_type +from typing import Literal as L, assert_type import numpy as np import numpy.polynomial.polyutils as pu import numpy.typing as npt from numpy.polynomial._polytypes import _Tuple2 -_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] -_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] -_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] +type _ArrFloat1D = np.ndarray[tuple[int], np.dtype[np.floating]] +type _ArrComplex1D = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +type _ArrObject1D = np.ndarray[tuple[int], np.dtype[np.object_]] -_ArrFloat1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.float64]] -_ArrComplex1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complex128]] -_ArrObject1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] +type _ArrFloat1D_2 = np.ndarray[tuple[L[2]], np.dtype[np.float64]] +type _ArrComplex1D_2 = np.ndarray[tuple[L[2]], np.dtype[np.complex128]] +type _ArrObject1D_2 = np.ndarray[tuple[L[2]], np.dtype[np.object_]] num_int: int num_float: float diff --git a/numpy/typing/tests/data/reveal/polynomial_series.pyi b/numpy/typing/tests/data/reveal/polynomial_series.pyi index 3188ad9a1239..b87ba4fb2677 100644 --- a/numpy/typing/tests/data/reveal/polynomial_series.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -1,15 +1,15 @@ from collections.abc import Sequence -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np import numpy.polynomial as npp import numpy.typing as npt -_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] -_ArrFloat1D64: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] -_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] -_ArrComplex1D128: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] -_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] +type _ArrFloat1D = np.ndarray[tuple[int], np.dtype[np.floating]] +type _ArrFloat1D64 = np.ndarray[tuple[int], np.dtype[np.float64]] +type _ArrComplex1D = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +type _ArrComplex1D128 = np.ndarray[tuple[int], np.dtype[np.complex128]] +type _ArrObject1D = np.ndarray[tuple[int], np.dtype[np.object_]] AR_b: npt.NDArray[np.bool] AR_u4: npt.NDArray[np.uint32] diff --git a/numpy/typing/tests/data/reveal/random.pyi b/numpy/typing/tests/data/reveal/random.pyi index e188eb02893f..72f8c62f79e0 100644 --- a/numpy/typing/tests/data/reveal/random.pyi +++ b/numpy/typing/tests/data/reveal/random.pyi @@ -349,11 +349,11 @@ assert_type(def_gen.gumbel(0.5, 0.5), float) assert_type(def_gen.gumbel(0.5, 0.5, size=None), float) assert_type(def_gen.gumbel(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.gumbel(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -363,11 +363,11 @@ assert_type(def_gen.laplace(0.5, 0.5), float) assert_type(def_gen.laplace(0.5, 0.5, size=None), float) assert_type(def_gen.laplace(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.laplace(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.laplace(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -377,11 +377,11 @@ assert_type(def_gen.logistic(0.5, 0.5), float) assert_type(def_gen.logistic(0.5, 0.5, size=None), float) assert_type(def_gen.logistic(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.logistic(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.logistic(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -419,11 +419,11 @@ assert_type(def_gen.normal(0.5, 0.5), float) assert_type(def_gen.normal(0.5, 0.5, size=None), float) assert_type(def_gen.normal(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.normal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.normal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.normal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -488,14 +488,14 @@ assert_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1), np assert_type(def_gen.hypergeometric(20, 20, 10), int) assert_type(def_gen.hypergeometric(20, 20, 10, size=None), int) assert_type(def_gen.hypergeometric(20, 20, 10, size=1), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.int64] | Any) assert_type(def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1), npt.NDArray[np.int64]) assert_type(def_gen.hypergeometric(20, I_arr_20, 10, size=1), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.int64] | Any) assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1), npt.NDArray[np.int64]) assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1), npt.NDArray[np.int64]) @@ -503,8 +503,8 @@ I_int64_100: npt.NDArray[np.int64] = np.array([100], dtype=np.int64) assert_type(def_gen.integers(0, 100), np.int64) assert_type(def_gen.integers(100), np.int64) -assert_type(def_gen.integers([100]), npt.NDArray[np.int64]) -assert_type(def_gen.integers(0, [100]), npt.NDArray[np.int64]) +assert_type(def_gen.integers([100]), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(0, [100]), npt.NDArray[np.int64] | Any) I_bool_low: npt.NDArray[np.bool] = np.array([0], dtype=np.bool) I_bool_low_like: list[int] = [0] @@ -515,107 +515,59 @@ assert_type(def_gen.integers(2, dtype=bool), bool) assert_type(def_gen.integers(0, 2, dtype=bool), bool) assert_type(def_gen.integers(1, dtype=bool, endpoint=True), bool) assert_type(def_gen.integers(0, 1, dtype=bool, endpoint=True), bool) -assert_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_high_open, dtype=bool), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=bool), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) assert_type(def_gen.integers(2, dtype=np.bool), np.bool) assert_type(def_gen.integers(0, 2, dtype=np.bool), np.bool) assert_type(def_gen.integers(1, dtype=np.bool, endpoint=True), np.bool) assert_type(def_gen.integers(0, 1, dtype=np.bool, endpoint=True), np.bool) -assert_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) I_u1_low: npt.NDArray[np.uint8] = np.array([0], dtype=np.uint8) I_u1_low_like: list[int] = [0] I_u1_high_open: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) I_u1_high_closed: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) -assert_type(def_gen.integers(256, dtype="u1"), np.uint8) -assert_type(def_gen.integers(0, 256, dtype="u1"), np.uint8) -assert_type(def_gen.integers(255, dtype="u1", endpoint=True), np.uint8) -assert_type(def_gen.integers(0, 255, dtype="u1", endpoint=True), np.uint8) -assert_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) - -assert_type(def_gen.integers(256, dtype="uint8"), np.uint8) -assert_type(def_gen.integers(0, 256, dtype="uint8"), np.uint8) -assert_type(def_gen.integers(255, dtype="uint8", endpoint=True), np.uint8) -assert_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True), np.uint8) -assert_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) - assert_type(def_gen.integers(256, dtype=np.uint8), np.uint8) assert_type(def_gen.integers(0, 256, dtype=np.uint8), np.uint8) assert_type(def_gen.integers(255, dtype=np.uint8, endpoint=True), np.uint8) assert_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True), np.uint8) -assert_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) I_u2_low: npt.NDArray[np.uint16] = np.array([0], dtype=np.uint16) I_u2_low_like: list[int] = [0] I_u2_high_open: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) I_u2_high_closed: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) -assert_type(def_gen.integers(65536, dtype="u2"), np.uint16) -assert_type(def_gen.integers(0, 65536, dtype="u2"), np.uint16) -assert_type(def_gen.integers(65535, dtype="u2", endpoint=True), np.uint16) -assert_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True), np.uint16) -assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) - -assert_type(def_gen.integers(65536, dtype="uint16"), np.uint16) -assert_type(def_gen.integers(0, 65536, dtype="uint16"), np.uint16) -assert_type(def_gen.integers(65535, dtype="uint16", endpoint=True), np.uint16) -assert_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True), np.uint16) -assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) - assert_type(def_gen.integers(65536, dtype=np.uint16), np.uint16) assert_type(def_gen.integers(0, 65536, dtype=np.uint16), np.uint16) assert_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True), np.uint16) assert_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True), np.uint16) -assert_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) I_u4_low: npt.NDArray[np.uint32] = np.array([0], dtype=np.uint32) I_u4_low_like: list[int] = [0] @@ -626,266 +578,122 @@ assert_type(def_gen.integers(4294967296, dtype=np.int_), np.int_) assert_type(def_gen.integers(0, 4294967296, dtype=np.int_), np.int_) assert_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True), np.int_) assert_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True), np.int_) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) - -assert_type(def_gen.integers(4294967296, dtype="u4"), np.uint32) -assert_type(def_gen.integers(0, 4294967296, dtype="u4"), np.uint32) -assert_type(def_gen.integers(4294967295, dtype="u4", endpoint=True), np.uint32) -assert_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True), np.uint32) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) - -assert_type(def_gen.integers(4294967296, dtype="uint32"), np.uint32) -assert_type(def_gen.integers(0, 4294967296, dtype="uint32"), np.uint32) -assert_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True), np.uint32) -assert_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True), np.uint32) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) assert_type(def_gen.integers(4294967296, dtype=np.uint32), np.uint32) assert_type(def_gen.integers(0, 4294967296, dtype=np.uint32), np.uint32) assert_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True), np.uint32) assert_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True), np.uint32) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) assert_type(def_gen.integers(4294967296, dtype=np.uint), np.uint) assert_type(def_gen.integers(0, 4294967296, dtype=np.uint), np.uint) assert_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True), np.uint) assert_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True), np.uint) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) I_u8_low: npt.NDArray[np.uint64] = np.array([0], dtype=np.uint64) I_u8_low_like: list[int] = [0] I_u8_high_open: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) I_u8_high_closed: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) -assert_type(def_gen.integers(18446744073709551616, dtype="u8"), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551616, dtype="u8"), np.uint64) -assert_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True), np.uint64) -assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) - -assert_type(def_gen.integers(18446744073709551616, dtype="uint64"), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551616, dtype="uint64"), np.uint64) -assert_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True), np.uint64) -assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) - assert_type(def_gen.integers(18446744073709551616, dtype=np.uint64), np.uint64) assert_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64), np.uint64) assert_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) assert_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) -assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) I_i1_low: npt.NDArray[np.int8] = np.array([-128], dtype=np.int8) I_i1_low_like: list[int] = [-128] I_i1_high_open: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) I_i1_high_closed: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) -assert_type(def_gen.integers(128, dtype="i1"), np.int8) -assert_type(def_gen.integers(-128, 128, dtype="i1"), np.int8) -assert_type(def_gen.integers(127, dtype="i1", endpoint=True), np.int8) -assert_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True), np.int8) -assert_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) - -assert_type(def_gen.integers(128, dtype="int8"), np.int8) -assert_type(def_gen.integers(-128, 128, dtype="int8"), np.int8) -assert_type(def_gen.integers(127, dtype="int8", endpoint=True), np.int8) -assert_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True), np.int8) -assert_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) - assert_type(def_gen.integers(128, dtype=np.int8), np.int8) assert_type(def_gen.integers(-128, 128, dtype=np.int8), np.int8) assert_type(def_gen.integers(127, dtype=np.int8, endpoint=True), np.int8) assert_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True), np.int8) -assert_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) I_i2_low: npt.NDArray[np.int16] = np.array([-32768], dtype=np.int16) I_i2_low_like: list[int] = [-32768] I_i2_high_open: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) I_i2_high_closed: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) -assert_type(def_gen.integers(32768, dtype="i2"), np.int16) -assert_type(def_gen.integers(-32768, 32768, dtype="i2"), np.int16) -assert_type(def_gen.integers(32767, dtype="i2", endpoint=True), np.int16) -assert_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True), np.int16) -assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) - -assert_type(def_gen.integers(32768, dtype="int16"), np.int16) -assert_type(def_gen.integers(-32768, 32768, dtype="int16"), np.int16) -assert_type(def_gen.integers(32767, dtype="int16", endpoint=True), np.int16) -assert_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True), np.int16) -assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) - assert_type(def_gen.integers(32768, dtype=np.int16), np.int16) assert_type(def_gen.integers(-32768, 32768, dtype=np.int16), np.int16) assert_type(def_gen.integers(32767, dtype=np.int16, endpoint=True), np.int16) assert_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True), np.int16) -assert_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) I_i4_low: npt.NDArray[np.int32] = np.array([-2147483648], dtype=np.int32) I_i4_low_like: list[int] = [-2147483648] I_i4_high_open: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) I_i4_high_closed: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) -assert_type(def_gen.integers(2147483648, dtype="i4"), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483648, dtype="i4"), np.int32) -assert_type(def_gen.integers(2147483647, dtype="i4", endpoint=True), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True), np.int32) -assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) - -assert_type(def_gen.integers(2147483648, dtype="int32"), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483648, dtype="int32"), np.int32) -assert_type(def_gen.integers(2147483647, dtype="int32", endpoint=True), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True), np.int32) -assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) - assert_type(def_gen.integers(2147483648, dtype=np.int32), np.int32) assert_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32), np.int32) assert_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True), np.int32) assert_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True), np.int32) -assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) I_i8_low: npt.NDArray[np.int64] = np.array([-9223372036854775808], dtype=np.int64) I_i8_low_like: list[int] = [-9223372036854775808] I_i8_high_open: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) I_i8_high_closed: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) -assert_type(def_gen.integers(9223372036854775808, dtype="i8"), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) -assert_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True), np.int64) -assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) - -assert_type(def_gen.integers(9223372036854775808, dtype="int64"), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) -assert_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True), np.int64) -assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) - assert_type(def_gen.integers(9223372036854775808, dtype=np.int64), np.int64) assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) assert_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True), np.int64) assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True), np.int64) -assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) assert_type(def_gen.bit_generator, np.random.BitGenerator) @@ -897,11 +705,12 @@ assert_type(def_gen.choice(5, 3, replace=True), npt.NDArray[np.int64]) assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5), npt.NDArray[np.int64]) assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False), npt.NDArray[np.int64]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"]), Any) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3), npt.NDArray[Any]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4), npt.NDArray[Any]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True), npt.NDArray[Any]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), npt.NDArray[Any]) +str_list: list[str] +assert_type(def_gen.choice(str_list), Any) +assert_type(def_gen.choice(str_list, 3), npt.NDArray[Any]) +assert_type(def_gen.choice(str_list, 3, p=[1 / 4] * 4), npt.NDArray[Any]) +assert_type(def_gen.choice(str_list, 3, replace=True), npt.NDArray[Any]) +assert_type(def_gen.choice(str_list, 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), npt.NDArray[Any]) assert_type(def_gen.dirichlet([0.5, 0.5]), npt.NDArray[np.float64]) assert_type(def_gen.dirichlet(np.array([0.5, 0.5])), npt.NDArray[np.float64]) @@ -929,13 +738,13 @@ assert_type(def_gen.permutation(10), npt.NDArray[np.int64]) assert_type(def_gen.permutation([1, 2, 3, 4]), npt.NDArray[Any]) assert_type(def_gen.permutation(np.array([1, 2, 3, 4])), npt.NDArray[Any]) assert_type(def_gen.permutation(D_2D, axis=1), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D), npt.NDArray[np.float64]) assert_type(def_gen.permuted(D_2D_like), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D, axis=1), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D, out=D_2D), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D, axis=1, out=D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D, axis=1), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D, out=D_2D), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D, axis=1, out=D_2D), npt.NDArray[np.float64]) assert_type(def_gen.shuffle(np.arange(10)), None) assert_type(def_gen.shuffle([1, 2, 3, 4, 5]), None) @@ -968,510 +777,511 @@ assert_type(random_st.standard_exponential(size=1), npt.NDArray[np.float64]) assert_type(random_st.zipf(1.5), int) assert_type(random_st.zipf(1.5, size=None), int) assert_type(random_st.zipf(1.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.zipf(D_arr_1p5), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_1p5), npt.NDArray[np.long] | Any) assert_type(random_st.zipf(D_arr_1p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.zipf(D_arr_like_1p5), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_like_1p5), npt.NDArray[np.long] | Any) assert_type(random_st.zipf(D_arr_like_1p5, size=1), npt.NDArray[np.long]) assert_type(random_st.weibull(0.5), float) assert_type(random_st.weibull(0.5, size=None), float) assert_type(random_st.weibull(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.weibull(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.weibull(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.weibull(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.weibull(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.standard_t(0.5), float) assert_type(random_st.standard_t(0.5, size=None), float) assert_type(random_st.standard_t(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.standard_t(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.standard_t(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.standard_t(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.standard_t(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.poisson(0.5), int) assert_type(random_st.poisson(0.5, size=None), int) assert_type(random_st.poisson(0.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.poisson(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.poisson(D_arr_0p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.poisson(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_like_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.poisson(D_arr_like_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.power(0.5), float) assert_type(random_st.power(0.5, size=None), float) assert_type(random_st.power(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.power(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.power(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.power(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.power(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.pareto(0.5), float) assert_type(random_st.pareto(0.5, size=None), float) assert_type(random_st.pareto(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.pareto(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.pareto(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.pareto(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.pareto(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.chisquare(0.5), float) assert_type(random_st.chisquare(0.5, size=None), float) assert_type(random_st.chisquare(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.chisquare(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.chisquare(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.chisquare(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.chisquare(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.exponential(0.5), float) assert_type(random_st.exponential(0.5, size=None), float) assert_type(random_st.exponential(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.exponential(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.exponential(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.exponential(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.exponential(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.geometric(0.5), int) assert_type(random_st.geometric(0.5, size=None), int) assert_type(random_st.geometric(0.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.geometric(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.geometric(D_arr_0p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.geometric(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_like_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.geometric(D_arr_like_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.logseries(0.5), int) assert_type(random_st.logseries(0.5, size=None), int) assert_type(random_st.logseries(0.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.logseries(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.logseries(D_arr_0p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.logseries(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_like_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.logseries(D_arr_like_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.rayleigh(0.5), float) assert_type(random_st.rayleigh(0.5, size=None), float) assert_type(random_st.rayleigh(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.rayleigh(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.rayleigh(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.rayleigh(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.rayleigh(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.standard_gamma(0.5), float) assert_type(random_st.standard_gamma(0.5, size=None), float) assert_type(random_st.standard_gamma(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.standard_gamma(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.standard_gamma(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.standard_gamma(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.vonmises(0.5, 0.5), float) assert_type(random_st.vonmises(0.5, 0.5, size=None), float) assert_type(random_st.vonmises(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.vonmises(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.vonmises(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.vonmises(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.vonmises(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.vonmises(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.wald(0.5, 0.5), float) assert_type(random_st.wald(0.5, 0.5, size=None), float) assert_type(random_st.wald(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.wald(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.wald(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.wald(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.wald(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.wald(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.wald(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.wald(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.wald(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.wald(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.wald(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.wald(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.uniform(0.5, 0.5), float) assert_type(random_st.uniform(0.5, 0.5, size=None), float) assert_type(random_st.uniform(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.uniform(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.uniform(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.uniform(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.uniform(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.uniform(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.uniform(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.uniform(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.uniform(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.uniform(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.uniform(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.uniform(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.beta(0.5, 0.5), float) assert_type(random_st.beta(0.5, 0.5, size=None), float) assert_type(random_st.beta(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.beta(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.beta(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.beta(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.beta(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.beta(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.beta(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.beta(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.beta(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.beta(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.beta(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.beta(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.f(0.5, 0.5), float) assert_type(random_st.f(0.5, 0.5, size=None), float) assert_type(random_st.f(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.f(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.f(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.f(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.f(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.f(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.f(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.f(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.f(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.f(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.f(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.f(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gamma(0.5, 0.5), float) assert_type(random_st.gamma(0.5, 0.5, size=None), float) assert_type(random_st.gamma(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.gamma(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.gamma(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gamma(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.gamma(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gamma(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.gamma(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.gamma(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.gamma(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gamma(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gamma(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.gamma(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gumbel(0.5, 0.5), float) assert_type(random_st.gumbel(0.5, 0.5, size=None), float) assert_type(random_st.gumbel(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.gumbel(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gumbel(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.laplace(0.5, 0.5), float) assert_type(random_st.laplace(0.5, 0.5, size=None), float) assert_type(random_st.laplace(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.laplace(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.laplace(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.laplace(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.logistic(0.5, 0.5), float) assert_type(random_st.logistic(0.5, 0.5, size=None), float) assert_type(random_st.logistic(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.logistic(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.logistic(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.logistic(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.lognormal(0.5, 0.5), float) assert_type(random_st.lognormal(0.5, 0.5, size=None), float) assert_type(random_st.lognormal(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.lognormal(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.lognormal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.lognormal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.lognormal(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.lognormal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_chisquare(0.5, 0.5), float) assert_type(random_st.noncentral_chisquare(0.5, 0.5, size=None), float) assert_type(random_st.noncentral_chisquare(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.normal(0.5, 0.5), float) assert_type(random_st.normal(0.5, 0.5, size=None), float) assert_type(random_st.normal(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.normal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.normal(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.normal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.normal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.normal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.triangular(0.1, 0.5, 0.9), float) assert_type(random_st.triangular(0.1, 0.5, 0.9, size=None), float) assert_type(random_st.triangular(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) -assert_type(random_st.triangular(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.triangular(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.triangular(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64] | Any) assert_type(random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.triangular(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) -assert_type(random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) -assert_type(random_st.triangular(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64] | Any) +assert_type(random_st.triangular(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64] | Any) assert_type(random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_f(0.1, 0.5, 0.9), float) assert_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=None), float) assert_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64] | Any) assert_type(random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64] | Any) assert_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.binomial(10, 0.5), int) assert_type(random_st.binomial(10, 0.5, size=None), int) assert_type(random_st.binomial(10, 0.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.binomial(I_arr_10, 0.5), npt.NDArray[np.long]) -assert_type(random_st.binomial(10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_10, 0.5), npt.NDArray[np.long] | Any) +assert_type(random_st.binomial(10, D_arr_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.long]) assert_type(random_st.binomial(10, D_arr_0p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.binomial(I_arr_like_10, 0.5), npt.NDArray[np.long]) -assert_type(random_st.binomial(10, D_arr_like_0p5), npt.NDArray[np.long]) -assert_type(random_st.binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long]) -assert_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_like_10, 0.5), npt.NDArray[np.long] | Any) +assert_type(random_st.binomial(10, D_arr_like_0p5), npt.NDArray[np.long] | Any) +assert_type(random_st.binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long] | Any) +assert_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.negative_binomial(10, 0.5), int) assert_type(random_st.negative_binomial(10, 0.5, size=None), int) assert_type(random_st.negative_binomial(10, 0.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(I_arr_10, 0.5), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_10, 0.5), npt.NDArray[np.long] | Any) +assert_type(random_st.negative_binomial(10, D_arr_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.negative_binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.long]) assert_type(random_st.negative_binomial(10, D_arr_0p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(I_arr_like_10, 0.5), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(10, D_arr_like_0p5), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_like_10, 0.5), npt.NDArray[np.long] | Any) +assert_type(random_st.negative_binomial(10, D_arr_like_0p5), npt.NDArray[np.long] | Any) +assert_type(random_st.negative_binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long] | Any) +assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long] | Any) +assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.hypergeometric(20, 20, 10), int) assert_type(random_st.hypergeometric(20, 20, 10, size=None), int) assert_type(random_st.hypergeometric(20, 20, 10, size=1), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.long] | Any) +assert_type(random_st.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.long] | Any) assert_type(random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1), npt.NDArray[np.long]) assert_type(random_st.hypergeometric(20, I_arr_20, 10, size=1), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.long] | Any) +assert_type(random_st.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.long] | Any) +assert_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.long] | Any) +assert_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.long] | Any) assert_type(random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1), npt.NDArray[np.long]) assert_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1), npt.NDArray[np.long]) assert_type(random_st.randint(0, 100), int) assert_type(random_st.randint(100), int) -assert_type(random_st.randint([100]), npt.NDArray[np.long]) -assert_type(random_st.randint(0, [100]), npt.NDArray[np.long]) +assert_type(random_st.randint([100]), npt.NDArray[np.long] | Any) +assert_type(random_st.randint(0, [100]), npt.NDArray[np.long] | Any) assert_type(random_st.randint(2, dtype=bool), bool) assert_type(random_st.randint(0, 2, dtype=bool), bool) -assert_type(random_st.randint(I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(random_st.randint(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(I_bool_high_open, dtype=bool), npt.NDArray[np.bool] | Any) +assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool] | Any) +assert_type(random_st.randint(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool] | Any) assert_type(random_st.randint(2, dtype=np.bool), np.bool) assert_type(random_st.randint(0, 2, dtype=np.bool), np.bool) -assert_type(random_st.randint(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(random_st.randint(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(random_st.randint(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) assert_type(random_st.randint(256, dtype="u1"), np.uint8) assert_type(random_st.randint(0, 256, dtype="u1"), np.uint8) -assert_type(random_st.randint(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(random_st.randint(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8] | Any) assert_type(random_st.randint(256, dtype="uint8"), np.uint8) assert_type(random_st.randint(0, 256, dtype="uint8"), np.uint8) -assert_type(random_st.randint(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(random_st.randint(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8] | Any) assert_type(random_st.randint(256, dtype=np.uint8), np.uint8) assert_type(random_st.randint(0, 256, dtype=np.uint8), np.uint8) -assert_type(random_st.randint(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) assert_type(random_st.randint(65536, dtype="u2"), np.uint16) assert_type(random_st.randint(0, 65536, dtype="u2"), np.uint16) -assert_type(random_st.randint(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(random_st.randint(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16] | Any) assert_type(random_st.randint(65536, dtype="uint16"), np.uint16) assert_type(random_st.randint(0, 65536, dtype="uint16"), np.uint16) -assert_type(random_st.randint(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(random_st.randint(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16] | Any) assert_type(random_st.randint(65536, dtype=np.uint16), np.uint16) assert_type(random_st.randint(0, 65536, dtype=np.uint16), np.uint16) -assert_type(random_st.randint(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) assert_type(random_st.randint(4294967296, dtype="u4"), np.uint32) assert_type(random_st.randint(0, 4294967296, dtype="u4"), np.uint32) -assert_type(random_st.randint(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(random_st.randint(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32] | Any) assert_type(random_st.randint(4294967296, dtype="uint32"), np.uint32) assert_type(random_st.randint(0, 4294967296, dtype="uint32"), np.uint32) -assert_type(random_st.randint(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(random_st.randint(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32] | Any) assert_type(random_st.randint(4294967296, dtype=np.uint32), np.uint32) assert_type(random_st.randint(0, 4294967296, dtype=np.uint32), np.uint32) -assert_type(random_st.randint(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) assert_type(random_st.randint(4294967296, dtype=np.uint), np.uint) assert_type(random_st.randint(0, 4294967296, dtype=np.uint), np.uint) -assert_type(random_st.randint(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(random_st.randint(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) assert_type(random_st.randint(18446744073709551616, dtype="u8"), np.uint64) assert_type(random_st.randint(0, 18446744073709551616, dtype="u8"), np.uint64) -assert_type(random_st.randint(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(random_st.randint(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64] | Any) assert_type(random_st.randint(18446744073709551616, dtype="uint64"), np.uint64) assert_type(random_st.randint(0, 18446744073709551616, dtype="uint64"), np.uint64) -assert_type(random_st.randint(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(random_st.randint(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64] | Any) assert_type(random_st.randint(18446744073709551616, dtype=np.uint64), np.uint64) assert_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64), np.uint64) -assert_type(random_st.randint(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) assert_type(random_st.randint(128, dtype="i1"), np.int8) assert_type(random_st.randint(-128, 128, dtype="i1"), np.int8) -assert_type(random_st.randint(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(random_st.randint(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8] | Any) assert_type(random_st.randint(128, dtype="int8"), np.int8) assert_type(random_st.randint(-128, 128, dtype="int8"), np.int8) -assert_type(random_st.randint(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(random_st.randint(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8] | Any) assert_type(random_st.randint(128, dtype=np.int8), np.int8) assert_type(random_st.randint(-128, 128, dtype=np.int8), np.int8) -assert_type(random_st.randint(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) assert_type(random_st.randint(32768, dtype="i2"), np.int16) assert_type(random_st.randint(-32768, 32768, dtype="i2"), np.int16) -assert_type(random_st.randint(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(random_st.randint(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16] | Any) assert_type(random_st.randint(32768, dtype="int16"), np.int16) assert_type(random_st.randint(-32768, 32768, dtype="int16"), np.int16) -assert_type(random_st.randint(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(random_st.randint(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16] | Any) assert_type(random_st.randint(32768, dtype=np.int16), np.int16) assert_type(random_st.randint(-32768, 32768, dtype=np.int16), np.int16) -assert_type(random_st.randint(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) assert_type(random_st.randint(2147483648, dtype="i4"), np.int32) assert_type(random_st.randint(-2147483648, 2147483648, dtype="i4"), np.int32) -assert_type(random_st.randint(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32] | Any) assert_type(random_st.randint(2147483648, dtype="int32"), np.int32) assert_type(random_st.randint(-2147483648, 2147483648, dtype="int32"), np.int32) -assert_type(random_st.randint(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32] | Any) assert_type(random_st.randint(2147483648, dtype=np.int32), np.int32) assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32), np.int32) -assert_type(random_st.randint(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) assert_type(random_st.randint(2147483648, dtype=np.int_), np.int_) assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int_), np.int_) -assert_type(random_st.randint(I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(random_st.randint(I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) assert_type(random_st.randint(9223372036854775808, dtype="i8"), np.int64) assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) -assert_type(random_st.randint(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64] | Any) assert_type(random_st.randint(9223372036854775808, dtype="int64"), np.int64) assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) -assert_type(random_st.randint(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64] | Any) assert_type(random_st.randint(9223372036854775808, dtype=np.int64), np.int64) assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) -assert_type(random_st.randint(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) assert_type(random_st._bit_generator, np.random.BitGenerator) diff --git a/numpy/typing/tests/data/reveal/rec.pyi b/numpy/typing/tests/data/reveal/rec.pyi index aacf217e4207..da66ab003078 100644 --- a/numpy/typing/tests/data/reveal/rec.pyi +++ b/numpy/typing/tests/data/reveal/rec.pyi @@ -1,10 +1,10 @@ import io -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt -_RecArray: TypeAlias = np.recarray[tuple[Any, ...], np.dtype[np.record]] +type _RecArray = np.recarray[tuple[Any, ...], np.dtype[np.record]] AR_i8: npt.NDArray[np.int64] REC_AR_V: _RecArray diff --git a/numpy/typing/tests/data/reveal/scalars.pyi b/numpy/typing/tests/data/reveal/scalars.pyi index 67444e33dfc3..c56c8e88092c 100644 --- a/numpy/typing/tests/data/reveal/scalars.pyi +++ b/numpy/typing/tests/data/reveal/scalars.pyi @@ -1,9 +1,7 @@ -from typing import Any, Literal, TypeAlias, assert_type +from typing import Any, Literal, assert_type import numpy as np -_1: TypeAlias = Literal[1] - b: np.bool u8: np.uint64 i8: np.int64 @@ -44,6 +42,94 @@ assert_type(c16.imag, np.float64) assert_type(np.str_("foo"), np.str_) +# Indexing +assert_type(b[()], np.bool) +assert_type(i8[()], np.int64) +assert_type(u8[()], np.uint64) +assert_type(f8[()], np.float64) +assert_type(c8[()], np.complex64) +assert_type(c16[()], np.complex128) +assert_type(U[()], np.str_) +assert_type(S[()], np.bytes_) +assert_type(V[()], np.void) + +assert_type(b[...], np.ndarray[tuple[()], np.dtype[np.bool]]) +assert_type(b[(...,)], np.ndarray[tuple[()], np.dtype[np.bool]]) +assert_type(i8[...], np.ndarray[tuple[()], np.dtype[np.int64]]) +assert_type(i8[(...,)], np.ndarray[tuple[()], np.dtype[np.int64]]) +assert_type(u8[...], np.ndarray[tuple[()], np.dtype[np.uint64]]) +assert_type(u8[(...,)], np.ndarray[tuple[()], np.dtype[np.uint64]]) +assert_type(f8[...], np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(f8[(...,)], np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(c8[...], np.ndarray[tuple[()], np.dtype[np.complex64]]) +assert_type(c8[(...,)], np.ndarray[tuple[()], np.dtype[np.complex64]]) +assert_type(c16[...], np.ndarray[tuple[()], np.dtype[np.complex128]]) +assert_type(c16[(...,)], np.ndarray[tuple[()], np.dtype[np.complex128]]) +assert_type(U[...], np.ndarray[tuple[()], np.dtype[np.str_]]) +assert_type(U[(...,)], np.ndarray[tuple[()], np.dtype[np.str_]]) +assert_type(S[...], np.ndarray[tuple[()], np.dtype[np.bytes_]]) +assert_type(S[(...,)], np.ndarray[tuple[()], np.dtype[np.bytes_]]) +assert_type(V[...], np.ndarray[tuple[()], np.dtype[np.void]]) +assert_type(V[(...,)], np.ndarray[tuple[()], np.dtype[np.void]]) + +None1 = (None,) +None2 = (None, None) +None3 = (None, None, None) +None4 = (None, None, None, None) + +assert_type(b[None], np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(b[None1], np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(b[None2], np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(b[None3], np.ndarray[tuple[int, int, int], np.dtype[np.bool]]) +assert_type(b[None4], np.ndarray[tuple[Any, ...], np.dtype[np.bool]]) + +assert_type(u8[None], np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(u8[None1], np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(u8[None2], np.ndarray[tuple[int, int], np.dtype[np.uint64]]) +assert_type(u8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.uint64]]) +assert_type(u8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.uint64]]) + +assert_type(i8[None], np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(i8[None1], np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(i8[None2], np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(i8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.int64]]) +assert_type(i8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.int64]]) + +assert_type(f8[None], np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(f8[None1], np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(f8[None2], np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(f8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.float64]]) +assert_type(f8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.float64]]) + +assert_type(c8[None], np.ndarray[tuple[int], np.dtype[np.complex64]]) +assert_type(c8[None1], np.ndarray[tuple[int], np.dtype[np.complex64]]) +assert_type(c8[None2], np.ndarray[tuple[int, int], np.dtype[np.complex64]]) +assert_type(c8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.complex64]]) +assert_type(c8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.complex64]]) + +assert_type(c16[None], np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(c16[None1], np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(c16[None2], np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(c16[None3], np.ndarray[tuple[int, int, int], np.dtype[np.complex128]]) +assert_type(c16[None4], np.ndarray[tuple[Any, ...], np.dtype[np.complex128]]) + +assert_type(U[None], np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(U[None1], np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(U[None2], np.ndarray[tuple[int, int], np.dtype[np.str_]]) +assert_type(U[None3], np.ndarray[tuple[int, int, int], np.dtype[np.str_]]) +assert_type(U[None4], np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) + +assert_type(S[None], np.ndarray[tuple[int], np.dtype[np.bytes_]]) +assert_type(S[None1], np.ndarray[tuple[int], np.dtype[np.bytes_]]) +assert_type(S[None2], np.ndarray[tuple[int, int], np.dtype[np.bytes_]]) +assert_type(S[None3], np.ndarray[tuple[int, int, int], np.dtype[np.bytes_]]) +assert_type(S[None4], np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) + +assert_type(V[None], np.ndarray[tuple[int], np.dtype[np.void]]) +assert_type(V[None1], np.ndarray[tuple[int], np.dtype[np.void]]) +assert_type(V[None2], np.ndarray[tuple[int, int], np.dtype[np.void]]) +assert_type(V[None3], np.ndarray[tuple[int, int, int], np.dtype[np.void]]) +assert_type(V[None4], np.ndarray[tuple[Any, ...], np.dtype[np.void]]) assert_type(V[0], Any) assert_type(V["field1"], Any) assert_type(V[["field1", "field2"]], np.void) @@ -110,17 +196,17 @@ assert_type(S.flatten(), np.ndarray[tuple[int], np.dtype[np.bytes_]]) assert_type(b.reshape(()), np.bool) assert_type(i8.reshape([]), np.int64) -assert_type(b.reshape(1), np.ndarray[tuple[_1], np.dtype[np.bool]]) -assert_type(i8.reshape(-1), np.ndarray[tuple[_1], np.dtype[np.int64]]) -assert_type(u8.reshape(1, 1), np.ndarray[tuple[_1, _1], np.dtype[np.uint64]]) -assert_type(f8.reshape(1, -1), np.ndarray[tuple[_1, _1], np.dtype[np.float64]]) -assert_type(c16.reshape(1, 1, 1), np.ndarray[tuple[_1, _1, _1], np.dtype[np.complex128]]) -assert_type(U.reshape(1, 1, 1, 1), np.ndarray[tuple[_1, _1, _1, _1], np.dtype[np.str_]]) +assert_type(b.reshape(1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(i8.reshape(-1), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(u8.reshape(1, 1), np.ndarray[tuple[int, int], np.dtype[np.uint64]]) +assert_type(f8.reshape(1, -1), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(c16.reshape(1, 1, 1), np.ndarray[tuple[int, int, int], np.dtype[np.complex128]]) +assert_type(U.reshape(1, 1, 1, 1), np.ndarray[tuple[int, int, int, int], np.dtype[np.str_]]) assert_type( S.reshape(1, 1, 1, 1, 1), np.ndarray[ # len(shape) >= 5 - tuple[_1, _1, _1, _1, _1, *tuple[_1, ...]], + tuple[int, int, int, int, int, *tuple[int, ...]], np.dtype[np.bytes_], ], ) diff --git a/numpy/typing/tests/data/reveal/shape_base.pyi b/numpy/typing/tests/data/reveal/shape_base.pyi index e409a53bcef9..09c3d732c2cc 100644 --- a/numpy/typing/tests/data/reveal/shape_base.pyi +++ b/numpy/typing/tests/data/reveal/shape_base.pyi @@ -1,4 +1,4 @@ -from typing import Any, assert_type +from typing import Any, Self, assert_type import numpy as np import numpy.typing as npt @@ -8,17 +8,44 @@ f8: np.float64 AR_b: npt.NDArray[np.bool] AR_i8: npt.NDArray[np.int64] +AR_i8_0d: np.ndarray[tuple[()], np.dtype[np.int64]] +AR_i8_1d: np.ndarray[tuple[int], np.dtype[np.int64]] +AR_i8_2d: np.ndarray[tuple[int, int], np.dtype[np.int64]] AR_f8: npt.NDArray[np.float64] AR_LIKE_f8: list[float] +# Duck-typed class implementing _SupportsSplitOps protocol for testing +class _SplitableArray: + shape: tuple[int, ...] + ndim: int + def swapaxes(self, axis1: int, axis2: int, /) -> Self: ... + def __getitem__(self, key: Any, /) -> Self: ... + +splitable: _SplitableArray + assert_type(np.take_along_axis(AR_f8, AR_i8, axis=1), npt.NDArray[np.float64]) assert_type(np.take_along_axis(f8, AR_i8, axis=None), npt.NDArray[np.float64]) assert_type(np.put_along_axis(AR_f8, AR_i8, "1.0", axis=1), None) -assert_type(np.expand_dims(AR_i8, 2), npt.NDArray[np.int64]) -assert_type(np.expand_dims(AR_LIKE_f8, 2), npt.NDArray[Any]) +assert_type(np.expand_dims(AR_LIKE_f8, 0), np.ndarray) +assert_type(np.expand_dims(AR_i8, ()), npt.NDArray[np.int64]) +assert_type(np.expand_dims(AR_i8, 0), npt.NDArray[np.int64]) +assert_type(np.expand_dims(AR_i8, (0,)), npt.NDArray[np.int64]) +assert_type(np.expand_dims(AR_i8, (0, 1)), npt.NDArray[np.int64]) +assert_type(np.expand_dims(AR_i8_0d, ()), np.ndarray[tuple[()], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_0d, 0), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_0d, (0,)), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_0d, (0, 1)), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_1d, ()), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_1d, 0), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_1d, (0,)), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_1d, (0, 1)), np.ndarray[tuple[int, int, int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_2d, ()), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_2d, 0), np.ndarray[tuple[int, int, int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_2d, (0,)), np.ndarray[tuple[int, int, int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_2d, (0, 1)), np.ndarray[tuple[int, int, int, int], np.dtype[np.int64]]) assert_type(np.column_stack([AR_i8]), npt.NDArray[np.int64]) assert_type(np.column_stack([AR_LIKE_f8]), npt.NDArray[Any]) @@ -28,18 +55,23 @@ assert_type(np.dstack([AR_LIKE_f8]), npt.NDArray[Any]) assert_type(np.array_split(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) assert_type(np.array_split(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) +assert_type(np.array_split(splitable, 2), list[_SplitableArray]) assert_type(np.split(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) assert_type(np.split(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) +assert_type(np.split(splitable, 2), list[_SplitableArray]) assert_type(np.hsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) assert_type(np.hsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) +assert_type(np.hsplit(splitable, 2), list[_SplitableArray]) assert_type(np.vsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) assert_type(np.vsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) +assert_type(np.vsplit(splitable, 2), list[_SplitableArray]) assert_type(np.dsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) assert_type(np.dsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) +assert_type(np.dsplit(splitable, 2), list[_SplitableArray]) assert_type(np.kron(AR_b, AR_b), npt.NDArray[np.bool]) assert_type(np.kron(AR_b, AR_i8), npt.NDArray[np.signedinteger]) diff --git a/numpy/typing/tests/data/reveal/strings.pyi b/numpy/typing/tests/data/reveal/strings.pyi index 18bd252d5ff9..eb441a6f61ab 100644 --- a/numpy/typing/tests/data/reveal/strings.pyi +++ b/numpy/typing/tests/data/reveal/strings.pyi @@ -1,43 +1,43 @@ -from typing import TypeAlias, assert_type +from typing import assert_type import numpy as np import numpy._typing as np_t import numpy.typing as npt -AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] -AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] +type AR_T_alias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +type AR_TU_alias = AR_T_alias | npt.NDArray[np.str_] AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] AR_T: AR_T_alias -assert_type(np.strings.equal(AR_U, AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.equal(AR_S, AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.equal(AR_T, AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.equal(AR_U, AR_U), np.ndarray) +assert_type(np.strings.equal(AR_S, AR_S), np.ndarray) +assert_type(np.strings.equal(AR_T, AR_T), np.ndarray) -assert_type(np.strings.not_equal(AR_U, AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.not_equal(AR_S, AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.not_equal(AR_T, AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.not_equal(AR_U, AR_U), np.ndarray) +assert_type(np.strings.not_equal(AR_S, AR_S), np.ndarray) +assert_type(np.strings.not_equal(AR_T, AR_T), np.ndarray) -assert_type(np.strings.greater_equal(AR_U, AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.greater_equal(AR_S, AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.greater_equal(AR_T, AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.greater_equal(AR_U, AR_U), np.ndarray) +assert_type(np.strings.greater_equal(AR_S, AR_S), np.ndarray) +assert_type(np.strings.greater_equal(AR_T, AR_T), np.ndarray) -assert_type(np.strings.less_equal(AR_U, AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.less_equal(AR_S, AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.less_equal(AR_T, AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.less_equal(AR_U, AR_U), np.ndarray) +assert_type(np.strings.less_equal(AR_S, AR_S), np.ndarray) +assert_type(np.strings.less_equal(AR_T, AR_T), np.ndarray) -assert_type(np.strings.greater(AR_U, AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.greater(AR_S, AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.greater(AR_T, AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.greater(AR_U, AR_U), np.ndarray) +assert_type(np.strings.greater(AR_S, AR_S), np.ndarray) +assert_type(np.strings.greater(AR_T, AR_T), np.ndarray) -assert_type(np.strings.less(AR_U, AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.less(AR_S, AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.less(AR_T, AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.less(AR_U, AR_U), np.ndarray) +assert_type(np.strings.less(AR_S, AR_S), np.ndarray) +assert_type(np.strings.less(AR_T, AR_T), np.ndarray) -assert_type(np.strings.add(AR_U, AR_U), npt.NDArray[np.str_]) -assert_type(np.strings.add(AR_S, AR_S), npt.NDArray[np.bytes_]) -assert_type(np.strings.add(AR_T, AR_T), AR_T_alias) +assert_type(np.strings.add(AR_U, AR_U), np.ndarray) +assert_type(np.strings.add(AR_S, AR_S), np.ndarray) +assert_type(np.strings.add(AR_T, AR_T), np.ndarray) assert_type(np.strings.multiply(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.strings.multiply(AR_S, [5, 4, 3]), npt.NDArray[np.bytes_]) @@ -149,43 +149,43 @@ assert_type(np.strings.rindex(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.rindex(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) assert_type(np.strings.rindex(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) -assert_type(np.strings.isalpha(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.isalpha(AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.isalpha(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.isalpha(AR_U), np.ndarray) +assert_type(np.strings.isalpha(AR_S), np.ndarray) +assert_type(np.strings.isalpha(AR_T), np.ndarray) -assert_type(np.strings.isalnum(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.isalnum(AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.isalnum(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.isalnum(AR_U), np.ndarray) +assert_type(np.strings.isalnum(AR_S), np.ndarray) +assert_type(np.strings.isalnum(AR_T), np.ndarray) -assert_type(np.strings.isdecimal(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.isdecimal(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.isdecimal(AR_U), np.ndarray) +assert_type(np.strings.isdecimal(AR_T), np.ndarray) -assert_type(np.strings.isdigit(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.isdigit(AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.isdigit(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.isdigit(AR_U), np.ndarray) +assert_type(np.strings.isdigit(AR_S), np.ndarray) +assert_type(np.strings.isdigit(AR_T), np.ndarray) -assert_type(np.strings.islower(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.islower(AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.islower(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.islower(AR_U), np.ndarray) +assert_type(np.strings.islower(AR_S), np.ndarray) +assert_type(np.strings.islower(AR_T), np.ndarray) -assert_type(np.strings.isnumeric(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.isnumeric(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.isnumeric(AR_U), np.ndarray) +assert_type(np.strings.isnumeric(AR_T), np.ndarray) -assert_type(np.strings.isspace(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.isspace(AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.isspace(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.isspace(AR_U), np.ndarray) +assert_type(np.strings.isspace(AR_S), np.ndarray) +assert_type(np.strings.isspace(AR_T), np.ndarray) -assert_type(np.strings.istitle(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.istitle(AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.istitle(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.istitle(AR_U), np.ndarray) +assert_type(np.strings.istitle(AR_S), np.ndarray) +assert_type(np.strings.istitle(AR_T), np.ndarray) -assert_type(np.strings.isupper(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.isupper(AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.isupper(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.isupper(AR_U), np.ndarray) +assert_type(np.strings.isupper(AR_S), np.ndarray) +assert_type(np.strings.isupper(AR_T), np.ndarray) -assert_type(np.strings.str_len(AR_U), npt.NDArray[np.int_]) -assert_type(np.strings.str_len(AR_S), npt.NDArray[np.int_]) -assert_type(np.strings.str_len(AR_T), npt.NDArray[np.int_]) +assert_type(np.strings.str_len(AR_U), np.ndarray) +assert_type(np.strings.str_len(AR_S), np.ndarray) +assert_type(np.strings.str_len(AR_T), np.ndarray) assert_type(np.strings.translate(AR_U, ""), npt.NDArray[np.str_]) assert_type(np.strings.translate(AR_S, ""), npt.NDArray[np.bytes_]) diff --git a/numpy/typing/tests/data/reveal/testing.pyi b/numpy/typing/tests/data/reveal/testing.pyi index 34fbc5feeb41..0361f635a848 100644 --- a/numpy/typing/tests/data/reveal/testing.pyi +++ b/numpy/typing/tests/data/reveal/testing.pyi @@ -6,7 +6,7 @@ import unittest import warnings from collections.abc import Callable from pathlib import Path -from typing import Any, TypeVar, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt @@ -16,7 +16,6 @@ AR_i8: npt.NDArray[np.int64] bool_obj: bool suppress_obj: np.testing.suppress_warnings # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -FT = TypeVar("FT", bound=Callable[..., Any]) def func() -> int: ... @@ -66,7 +65,6 @@ with suppress_obj as c3: assert_type(c3, np.testing.suppress_warnings) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] assert_type(np.testing.verbose, int) -assert_type(np.testing.IS_PYPY, bool) assert_type(np.testing.HAS_REFCOUNT, bool) assert_type(np.testing.HAS_LAPACK64, bool) @@ -148,7 +146,7 @@ assert_type(np.testing.assert_raises_regex(RuntimeWarning, re.compile(b"test"), class Test: ... -def decorate(a: FT) -> FT: +def decorate[FT: Callable[..., Any]](a: FT) -> FT: return a assert_type(np.testing.decorate_methods(Test, decorate), None) diff --git a/numpy/typing/tests/data/reveal/twodim_base.pyi b/numpy/typing/tests/data/reveal/twodim_base.pyi index 30d4f408f1a9..d8c45afe44ab 100644 --- a/numpy/typing/tests/data/reveal/twodim_base.pyi +++ b/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -1,15 +1,13 @@ -from typing import Any, TypeAlias, TypeVar, assert_type, type_check_only +from typing import Any, assert_type, type_check_only import numpy as np import numpy.typing as npt -_ScalarT = TypeVar("_ScalarT", bound=np.generic) +type _1D = tuple[int] +type _2D = tuple[int, int] +type _ND = tuple[Any, ...] -_1D: TypeAlias = tuple[int] -_2D: TypeAlias = tuple[int, int] -_ND: TypeAlias = tuple[Any, ...] - -_Indices2D: TypeAlias = tuple[ +type _Indices2D = tuple[ np.ndarray[_1D, np.dtype[np.intp]], np.ndarray[_1D, np.dtype[np.intp]], ] @@ -33,7 +31,7 @@ _to_1d_f64: list[float] _to_1d_c128: list[complex] @type_check_only -def func1(ar: npt.NDArray[_ScalarT], a: int) -> npt.NDArray[_ScalarT]: ... +def func1[ScalarT: np.generic](ar: npt.NDArray[ScalarT], a: int) -> npt.NDArray[ScalarT]: ... @type_check_only def func2(ar: npt.NDArray[np.number], a: str) -> npt.NDArray[np.float64]: ... @@ -110,7 +108,7 @@ assert_type(np.vander(_nd_obj), np.ndarray[_2D, np.dtype[np.object_]]) assert_type( np.histogram2d(_to_1d_f64, _to_1d_f64), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.float64]], ], @@ -118,7 +116,7 @@ assert_type( assert_type( np.histogram2d(_to_1d_c128, _to_1d_c128), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.complex128 | Any]], np.ndarray[_1D, np.dtype[np.complex128 | Any]], ], @@ -126,7 +124,7 @@ assert_type( assert_type( np.histogram2d(_nd_i64, _nd_bool), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.float64]], ], @@ -134,7 +132,7 @@ assert_type( assert_type( np.histogram2d(_nd_f64, _nd_i64), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.float64]], ], @@ -142,7 +140,7 @@ assert_type( assert_type( np.histogram2d(_nd_i64, _nd_f64), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.float64]], ], @@ -150,7 +148,7 @@ assert_type( assert_type( np.histogram2d(_nd_f64, _nd_c128, weights=_to_1d_bool), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.complex128]], np.ndarray[_1D, np.dtype[np.complex128]], ], @@ -158,7 +156,7 @@ assert_type( assert_type( np.histogram2d(_nd_f64, _nd_c128, bins=8), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.complex128]], np.ndarray[_1D, np.dtype[np.complex128]], ], @@ -166,7 +164,7 @@ assert_type( assert_type( np.histogram2d(_nd_c128, _nd_f64, bins=(8, 5)), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.complex128]], np.ndarray[_1D, np.dtype[np.complex128]], ], @@ -174,7 +172,7 @@ assert_type( assert_type( np.histogram2d(_nd_c128, _nd_i64, bins=_nd_u64), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.uint64]], np.ndarray[_1D, np.dtype[np.uint64]], ], @@ -182,7 +180,7 @@ assert_type( assert_type( np.histogram2d(_nd_c128, _nd_c128, bins=(_nd_u64, _nd_u64)), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.uint64]], np.ndarray[_1D, np.dtype[np.uint64]], ], @@ -190,7 +188,7 @@ assert_type( assert_type( np.histogram2d(_nd_c128, _nd_c128, bins=(_nd_bool, 8)), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.complex128 | np.bool]], np.ndarray[_1D, np.dtype[np.complex128 | np.bool]], ], @@ -198,7 +196,7 @@ assert_type( assert_type( np.histogram2d(_nd_c128, _nd_c128, bins=(_to_1d_f64, 8)), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.complex128 | Any]], np.ndarray[_1D, np.dtype[np.complex128 | Any]], ], diff --git a/numpy/typing/tests/data/reveal/type_check.pyi b/numpy/typing/tests/data/reveal/type_check.pyi index df95da78ffb7..a1cc267b7494 100644 --- a/numpy/typing/tests/data/reveal/type_check.pyi +++ b/numpy/typing/tests/data/reveal/type_check.pyi @@ -3,10 +3,11 @@ from typing import Any, Literal, assert_type import numpy as np import numpy.typing as npt +i4: np.int32 f8: np.float64 -f: float +m8_ns: np.timedelta64[int] +M8_ns: np.datetime64[int] -# NOTE: Avoid importing the platform specific `np.float128` type AR_i8: npt.NDArray[np.int64] AR_i4: npt.NDArray[np.int32] AR_f2: npt.NDArray[np.float16] @@ -15,7 +16,15 @@ AR_f16: npt.NDArray[np.longdouble] AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_c16_1d: np.ndarray[tuple[int], np.dtype[np.complex128]] +AR_c16_2d: np.ndarray[tuple[int, int], np.dtype[np.complex128]] + +AR_LIKE_b: list[bool] +AR_LIKE_i: list[int] AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] class ComplexObj: real: slice @@ -44,20 +53,38 @@ assert_type(np.isreal(AR_LIKE_f), npt.NDArray[np.bool]) assert_type(np.iscomplexobj(f8), bool) assert_type(np.isrealobj(f8), bool) +assert_type(np.nan_to_num(True), np.bool) +assert_type(np.nan_to_num(0), np.int_ | Any) +assert_type(np.nan_to_num(0.0), np.float64 | Any) +assert_type(np.nan_to_num(0j), np.complex128 | Any) +assert_type(np.nan_to_num(i4), np.int32) assert_type(np.nan_to_num(f8), np.float64) -assert_type(np.nan_to_num(f, copy=True), Any) -assert_type(np.nan_to_num(AR_f8, nan=1.5), npt.NDArray[np.float64]) -assert_type(np.nan_to_num(AR_LIKE_f, posinf=9999), npt.NDArray[Any]) +assert_type(np.nan_to_num(m8_ns), np.timedelta64[int]) +assert_type(np.nan_to_num(M8_ns), np.datetime64[int]) +assert_type(np.nan_to_num(AR_LIKE_b), npt.NDArray[np.bool]) +assert_type(np.nan_to_num(AR_LIKE_i), npt.NDArray[np.int_]) +assert_type(np.nan_to_num(AR_LIKE_f), npt.NDArray[np.float64]) +assert_type(np.nan_to_num(AR_LIKE_c), npt.NDArray[np.complex128]) +assert_type(np.nan_to_num(AR_f8), npt.NDArray[np.float64]) +assert_type(np.nan_to_num(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.nan_to_num(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.nan_to_num(AR_f8_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.nan_to_num(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.nan_to_num(AR_c16_2d), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.real_if_close(AR_LIKE_f), npt.NDArray[Any]) assert_type(np.real_if_close(AR_f8), npt.NDArray[np.float64]) -assert_type(np.real_if_close(AR_c16), npt.NDArray[np.float64 | np.complex128]) assert_type(np.real_if_close(AR_c8), npt.NDArray[np.float32 | np.complex64]) -assert_type(np.real_if_close(AR_LIKE_f), npt.NDArray[Any]) +assert_type(np.real_if_close(AR_c16), npt.NDArray[np.float64 | np.complex128]) +assert_type(np.real_if_close(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.real_if_close(AR_f8_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.real_if_close(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.float64 | np.complex128]]) +assert_type(np.real_if_close(AR_c16_2d), np.ndarray[tuple[int, int], np.dtype[np.float64 | np.complex128]]) -assert_type(np.typename("h"), Literal["short"]) -assert_type(np.typename("B"), Literal["unsigned char"]) -assert_type(np.typename("V"), Literal["void"]) -assert_type(np.typename("S1"), Literal["character"]) +assert_type(np.typename("h"), Literal["short"]) # type: ignore[deprecated] +assert_type(np.typename("B"), Literal["unsigned char"]) # type: ignore[deprecated] +assert_type(np.typename("V"), Literal["void"]) # type: ignore[deprecated] +assert_type(np.typename("S1"), Literal["character"]) # type: ignore[deprecated] assert_type(np.common_type(AR_i4), type[np.float64]) assert_type(np.common_type(AR_f2), type[np.float16]) diff --git a/numpy/typing/tests/data/reveal/ufunc_config.pyi b/numpy/typing/tests/data/reveal/ufunc_config.pyi index 77c27eb3b4ca..f205b82b4f75 100644 --- a/numpy/typing/tests/data/reveal/ufunc_config.pyi +++ b/numpy/typing/tests/data/reveal/ufunc_config.pyi @@ -5,18 +5,19 @@ from collections.abc import Callable from typing import Any, assert_type import numpy as np +from numpy._core._ufunc_config import _ErrDict def func(a: str, b: int) -> None: ... class Write: def write(self, value: str) -> None: ... -assert_type(np.seterr(all=None), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(divide="ignore"), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(over="warn"), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(under="call"), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(invalid="raise"), np._core._ufunc_config._ErrDict) -assert_type(np.geterr(), np._core._ufunc_config._ErrDict) +assert_type(np.seterr(all=None), _ErrDict) +assert_type(np.seterr(divide="ignore"), _ErrDict) +assert_type(np.seterr(over="warn"), _ErrDict) +assert_type(np.seterr(under="call"), _ErrDict) +assert_type(np.seterr(invalid="raise"), _ErrDict) +assert_type(np.geterr(), _ErrDict) assert_type(np.setbufsize(4096), int) assert_type(np.getbufsize(), int) diff --git a/numpy/typing/tests/test_isfile.py b/numpy/typing/tests/test_isfile.py index 0e3157a1e54d..250686a98ee8 100644 --- a/numpy/typing/tests/test_isfile.py +++ b/numpy/typing/tests/test_isfile.py @@ -1,5 +1,4 @@ import os -import sys from pathlib import Path import pytest @@ -23,8 +22,6 @@ ROOT / "random" / "__init__.pyi", ROOT / "testing" / "__init__.pyi", ] -if sys.version_info < (3, 12): - FILES += [ROOT / "distutils" / "__init__.pyi"] @pytest.mark.thread_unsafe( diff --git a/numpy/typing/tests/test_runtime.py b/numpy/typing/tests/test_runtime.py index 462fe4eabdc0..9db74c8ddc28 100644 --- a/numpy/typing/tests/test_runtime.py +++ b/numpy/typing/tests/test_runtime.py @@ -3,7 +3,8 @@ from typing import ( Any, NamedTuple, - Union, # pyright: ignore[reportDeprecated] + Self, + TypeAliasType, get_args, get_origin, get_type_hints, @@ -17,31 +18,23 @@ class TypeTup(NamedTuple): - typ: type - args: tuple[type, ...] - origin: type | None + typ: type # type expression + args: tuple[type, ...] # generic type parameters or arguments + origin: type | None # e.g. `UnionType` or `GenericAlias` + @classmethod + def from_type_alias(cls, alias: TypeAliasType, /) -> Self: + # PEP 695 `type _ = ...` aliases wrap the type expression as a + # `types.TypeAliasType` instance with a `__value__` attribute. + tp = alias.__value__ + return cls(typ=tp, args=get_args(tp), origin=get_origin(tp)) -def _flatten_type_alias(t: Any) -> Any: - # "flattens" a TypeAliasType to its underlying type alias - return getattr(t, "__value__", t) - - -NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray) TYPES = { - "ArrayLike": TypeTup( - _flatten_type_alias(npt.ArrayLike), - _flatten_type_alias(npt.ArrayLike).__args__, - Union, - ), - "DTypeLike": TypeTup( - _flatten_type_alias(npt.DTypeLike), - _flatten_type_alias(npt.DTypeLike).__args__, - Union, - ), + "ArrayLike": TypeTup.from_type_alias(npt.ArrayLike), + "DTypeLike": TypeTup.from_type_alias(npt.DTypeLike), "NBitBase": TypeTup(npt.NBitBase, (), None), # type: ignore[deprecated] # pyright: ignore[reportDeprecated] - "NDArray": NDArrayTup, + "NDArray": TypeTup.from_type_alias(npt.NDArray), } diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index ca4cf37fec3b..1c37bb8a1401 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -82,7 +82,7 @@ def run_mypy() -> None: """ if ( os.path.isdir(CACHE_DIR) - and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True)) # noqa: PLW1508 + and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True)) ): shutil.rmtree(CACHE_DIR) @@ -187,8 +187,10 @@ def test_reveal(path: str) -> None: pytest.fail(reasons, pytrace=False) +@pytest.mark.filterwarnings("ignore::DeprecationWarning") @pytest.mark.slow @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.filterwarnings("ignore:numpy.fix is deprecated:DeprecationWarning") @pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) def test_code_runs(path: str) -> None: """Validate that the code in `path` properly during runtime.""" diff --git a/pixi-packages/README.md b/pixi-packages/README.md new file mode 100644 index 000000000000..bd47c91bcc92 --- /dev/null +++ b/pixi-packages/README.md @@ -0,0 +1,109 @@ +# NumPy Pixi packages + +This directory contains definitions for [Pixi +packages](https://pixi.sh/latest/reference/pixi_manifest/#the-package-section) which can +be built from the NumPy source code. + +Downstream developers can make use of these packages by adding them as Git dependencies +in a [Pixi workspace](https://pixi.sh/latest/first_workspace/). + +This is particularly useful when developers need to build NumPy from source +(for example, for an ASan-instrumented build), as it does not require any manual +clone or build steps. Instead, Pixi will automatically handle both the build +and installation of the package. + +See [scipy#24066](https://github.com/scipy/scipy/pull/24066) for a full example of +downstream use. + +## Variants +Each package definition is contained in a subdirectory. +All package variants include debug symbols. + +Currently defined variants: + +### `default` +GIL-enabled build. + +Usage: +```toml +[dependencies] +python = "*" +numpy.git = "https://github.com/numpy/numpy" +numpy.subdirectory = "pixi-packages/default" +``` +See `default/pixi.toml` if you wish to use python git tip instead. + +*Tip:* you may change fork and add `numpy.rev = ""` to test unmerged +PRs. + +### `freethreading` +noGIL build. + +Usage: +```toml +[dependencies] +python-freethreading = "*" +numpy.git = "https://github.com/numpy/numpy" +numpy.subdirectory = "pixi-packages/freethreading" +``` +See `freethreading/pixi.toml` if you wish to use python git tip instead. + +### `asan` +ASan-instrumented build with `-Db_sanitize=address`. + +Usage: +```toml +[dependencies] +python.git = "https://github.com/python/cpython" +python.subdirectory = "Tools/pixi-packages/asan" +numpy.git = "https://github.com/numpy/numpy" +numpy.subdirectory = "pixi-packages/asan" +``` + +### `tsan-freethreading` +Freethreading TSan-instrumented build with `-Db_sanitize=thread`. + +Usage: +```toml +[dependencies] +python.git = "https://github.com/python/cpython" +python.subdirectory = "Tools/pixi-packages/tsan-freethreading" +numpy.git = "https://github.com/numpy/numpy" +numpy.subdirectory = "pixi-packages/tsan-freethreading" +``` + +## Maintenance + +- Keep host dependency requirements up to date + +## Troubleshooting + +TSan builds may crash on Linux with +``` +FATAL: ThreadSanitizer: unexpected memory mapping 0x7977bd072000-0x7977bd500000 +``` +To fix it, try reducing `mmap_rnd_bits`: + +```bash +$ sudo sysctl vm.mmap_rnd_bits +vm.mmap_rnd_bits = 32 # too high for TSan +$ sudo sysctl vm.mmap_rnd_bits=28 # reduce it +vm.mmap_rnd_bits = 28 +``` + +## Opportunities for future improvement + +- More package variants (such as UBSan) +- Support for Windows +- Using a single `pixi.toml` for all package variants is blocked on + [pixi#2813](https://github.com/prefix-dev/pixi/issues/2813) +- Consider pinning dependency versions to guard against upstream breakages over time + +## Known issues +- [numpy#30561](https://github.com/numpy/numpy/issues/30561): `default` and + `freethreading` recipes must be manually tweaked to compile against cpython git tip; + see `default/pixi.toml` and `freethreading/pixi.toml` for details. +- [pixi#5226](https://github.com/prefix-dev/pixi/issues/5226): lock file is invalidated + on all `pixi` invocations +- [rattler-build#2094](https://github.com/prefix-dev/rattler-build/issues/2094): pixi + 0.63.0 introduces a regression regarding the license file; please skip it diff --git a/pixi-packages/asan/LICENSE.txt b/pixi-packages/asan/LICENSE.txt new file mode 100644 index 000000000000..f37a12cc4ccc --- /dev/null +++ b/pixi-packages/asan/LICENSE.txt @@ -0,0 +1,30 @@ +Copyright (c) 2005-2025, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pixi-packages/asan/pixi.toml b/pixi-packages/asan/pixi.toml new file mode 100644 index 000000000000..afe9edc50cba --- /dev/null +++ b/pixi-packages/asan/pixi.toml @@ -0,0 +1,30 @@ +[workspace] +channels = ["https://prefix.dev/conda-forge"] +platforms = ["linux-64", "linux-aarch64", "osx-arm64"] +preview = ["pixi-build"] + +[package] +# Work-around to https://github.com/prefix-dev/pixi/issues/5557 +license-file = "LICENSE.txt" + +[package.build] +source.path = "../.." + +[package.build.backend] +name = "pixi-build-python" +version = "*" + +[package.build.config] +extra-input-globs = ["**/*.c.src"] +compilers = ["c", "cxx"] +env.ASAN_OPTIONS = "detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1:use_sigaltstack=0" +extra-args = ["-Csetup-args=-Db_sanitize=address", "-Csetup-args=-Dbuildtype=debug"] + +[package.host-dependencies] +python.git = "https://github.com/python/cpython" +python.subdirectory = "Tools/pixi-packages/asan" +python.rev = "15b216f30d0445469ec31bc7509fcc55a216ef7c" # v3.15.0a6 + +meson-python = "*" +cython = "*" +uv = "*" # used to invoke the wheel build diff --git a/pixi-packages/default/LICENSE.txt b/pixi-packages/default/LICENSE.txt new file mode 100644 index 000000000000..f37a12cc4ccc --- /dev/null +++ b/pixi-packages/default/LICENSE.txt @@ -0,0 +1,30 @@ +Copyright (c) 2005-2025, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pixi-packages/default/pixi.toml b/pixi-packages/default/pixi.toml new file mode 100644 index 000000000000..e3e201bece3e --- /dev/null +++ b/pixi-packages/default/pixi.toml @@ -0,0 +1,38 @@ +[workspace] +channels = ["https://prefix.dev/conda-forge"] +platforms = ["linux-64", "linux-aarch64", "osx-arm64"] +preview = ["pixi-build"] + +[package] +# Work-around to https://github.com/prefix-dev/pixi/issues/5557 +license-file = "LICENSE.txt" + +[package.build] +source.path = "../.." + +[package.build.backend] +name = "pixi-build-python" +version = "*" + +[package.build.config] +extra-input-globs = ["**/*.c.src"] +compilers = ["c", "cxx"] +extra-args = ["-Csetup-args=-Dbuildtype=debug"] + +[package.host-dependencies] +# FIXME https://github.com/numpy/numpy/issues/30478 +# python = "*" prevents downstream from building cpython from sources. +# Workaround: fork numpy, then uncomment one of the following and +# comment out the other. + +# Use latest cpython release from conda-forge +python = "*" + +# Use cpython git tip +# python.git = "https://github.com/python/cpython" +# python.subdirectory = "Tools/pixi-packages/default" +# python.rev = "15b216f30d0445469ec31bc7509fcc55a216ef7c" # v3.15.0a6 + +meson-python = "*" +cython = "*" +uv = "*" # used to invoke the wheel build diff --git a/pixi-packages/freethreading/LICENSE.txt b/pixi-packages/freethreading/LICENSE.txt new file mode 100644 index 000000000000..f37a12cc4ccc --- /dev/null +++ b/pixi-packages/freethreading/LICENSE.txt @@ -0,0 +1,30 @@ +Copyright (c) 2005-2025, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pixi-packages/freethreading/pixi.toml b/pixi-packages/freethreading/pixi.toml new file mode 100644 index 000000000000..372fcbaa2c48 --- /dev/null +++ b/pixi-packages/freethreading/pixi.toml @@ -0,0 +1,38 @@ +[workspace] +channels = ["https://prefix.dev/conda-forge"] +platforms = ["linux-64", "linux-aarch64", "osx-arm64"] +preview = ["pixi-build"] + +[package] +# Work-around to https://github.com/prefix-dev/pixi/issues/5557 +license-file = "LICENSE.txt" + +[package.build] +source.path = "../.." + +[package.build.backend] +name = "pixi-build-python" +version = "*" + +[package.build.config] +extra-input-globs = ["**/*.c.src"] +compilers = ["c", "cxx"] +extra-args = ["-Csetup-args=-Dbuildtype=debug"] + +[package.host-dependencies] +# FIXME https://github.com/numpy/numpy/issues/30478 +# python = "*" prevents downstream from building cpython from sources. +# Workaround: fork numpy, then uncomment one of the following and +# comment out the other. + +# Use latest cpython release from conda-forge +python-freethreading = "*" + +# Use cpython git tip +# python.git = "https://github.com/python/cpython" +# python.subdirectory = "Tools/pixi-packages/freethreading" +# python.rev = "15b216f30d0445469ec31bc7509fcc55a216ef7c" # v3.15.0a6 + +meson-python = "*" +cython = "*" +uv = "*" # used to invoke the wheel build diff --git a/pixi-packages/tsan-freethreading/LICENSE.txt b/pixi-packages/tsan-freethreading/LICENSE.txt new file mode 100644 index 000000000000..f37a12cc4ccc --- /dev/null +++ b/pixi-packages/tsan-freethreading/LICENSE.txt @@ -0,0 +1,30 @@ +Copyright (c) 2005-2025, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pixi-packages/tsan-freethreading/pixi.toml b/pixi-packages/tsan-freethreading/pixi.toml new file mode 100644 index 000000000000..8dab473f605d --- /dev/null +++ b/pixi-packages/tsan-freethreading/pixi.toml @@ -0,0 +1,32 @@ +[workspace] +channels = ["https://prefix.dev/conda-forge"] +platforms = ["linux-64", "linux-aarch64", "osx-arm64"] +preview = ["pixi-build"] + +[package] +# Work-around to https://github.com/prefix-dev/pixi/issues/5557 +license-file = "LICENSE.txt" + +[package.build] +source.path = "../.." + +[package.build.backend] +name = "pixi-build-python" +version = "*" + +[package.build.config] +extra-input-globs = ["**/*.c.src"] +compilers = ["c", "cxx"] +env.TSAN_OPTIONS = "halt_on_error=0:allocator_may_return_null=1" +extra-args = ["-Csetup-args=-Db_sanitize=thread", "-Csetup-args=-Dbuildtype=debug"] +# TODO: skip slow bytecode compilation step, see https://github.com/prefix-dev/pixi/pull/5737 +# skip-pyc-compilation = ["**/*.py"] + +[package.host-dependencies] +python.git = "https://github.com/python/cpython" +python.subdirectory = "Tools/pixi-packages/tsan-freethreading" +python.rev = "15b216f30d0445469ec31bc7509fcc55a216ef7c" # v3.15.0a6 + +meson-python = "*" +cython = "*" +uv = "*" # used to invoke the wheel build diff --git a/pyproject.toml b/pyproject.toml index 0dba95b04e84..732ab2741993 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,13 +7,13 @@ requires = [ [project] name = "numpy" -version = "2.4.0.dev0" +version = "2.5.0.dev0" description = "Fundamental package for array computing in Python" authors = [{name = "Travis E. Oliphant et al."}] maintainers = [ {name = "NumPy Developers", email="numpy-discussion@python.org"}, ] -requires-python = ">=3.11" +requires-python = ">=3.12" readme = "README.md" classifiers = [ 'Development Status :: 5 - Production/Stable', @@ -22,7 +22,6 @@ classifiers = [ 'Programming Language :: C', 'Programming Language :: Python', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Programming Language :: Python :: 3.13', 'Programming Language :: Python :: 3.14', @@ -54,7 +53,7 @@ license-files = [ 'LICENSE.txt', # BSD-3-Clause 'numpy/_core/include/numpy/libdivide/LICENSE.txt', # Zlib 'numpy/_core/src/common/pythoncapi-compat/COPYING', # 0BSD - 'numpy/_core/src/highway/LICENSE-BSD3', # BSD-3-Clause + 'numpy/_core/src/highway/LICENSE', # Dual-licensed: Apache 2.0 or BSD 3-Clause 'numpy/_core/src/multiarray/dragon4_LICENSE.txt', # MIT 'numpy/_core/src/npysort/x86-simd-sort/LICENSE.md', # BSD-3-Clause 'numpy/_core/src/umath/svml/LICENSE', # BSD-3-Clause @@ -174,6 +173,11 @@ tracker = "https://github.com/numpy/numpy/issues" name = "Performance improvements and changes" showcontent = true + [[tool.towncrier.type]] + directory = "typing" + name = "Typing improvements and changes" + showcontent = true + [[tool.towncrier.type]] directory = "change" name = "Changes" @@ -188,7 +192,7 @@ skip = ["*_i686", "*_ppc64le", "*_s390x", "*_universal2"] before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" before-test = "pip install -r {project}/requirements/test_requirements.txt" test-command = "bash {project}/tools/wheels/cibw_test_command.sh {project}" -enable = ["cpython-freethreading", "pypy", "cpython-prerelease"] +enable = ["cpython-freethreading", "cpython-prerelease"] # The build will use openblas64 everywhere, except on arm64 macOS >=14.0 (uses Accelerate) [tool.cibuildwheel.config-settings] @@ -247,6 +251,7 @@ cli = 'vendored-meson/meson/meson.py' ".spin/cmds.py:build", ".spin/cmds.py:test", ".spin/cmds.py:mypy", + ".spin/cmds.py:pyrefly", ".spin/cmds.py:stubtest", ".spin/cmds.py:config_openblas", ".spin/cmds.py:lint", @@ -266,3 +271,29 @@ cli = 'vendored-meson/meson/meson.py' ".spin/cmds.py:check_tutorials", ] "Metrics" = [".spin/cmds.py:bench"] + + +[tool.pyrefly] +project-includes = ["numpy/**/*.pyi"] +project-excludes = ["numpy/typing/tests/**"] + +[tool.pyrefly.errors] +implicit-any = "error" +unannotated-parameter = "error" +unannotated-return = "error" + +[[tool.pyrefly.sub-config]] +matches = "numpy/__init__.pyi" +errors = { bad-override = "ignore" } + +[[tool.pyrefly.sub-config]] +matches = "numpy/_typing/_nbit_base.pyi" +errors = { invalid-inheritance = "ignore" } + +[[tool.pyrefly.sub-config]] +matches = "numpy/ma/core.pyi" +errors = { bad-override = "ignore" } + +[[tool.pyrefly.sub-config]] +matches = "numpy/matrixlib/defmatrix.pyi" +errors = { bad-override = "ignore" } diff --git a/pytest.ini b/pytest.ini index b8a1da2b4ec6..532095ab9aa7 100644 --- a/pytest.ini +++ b/pytest.ini @@ -22,11 +22,8 @@ filterwarnings = ignore:The numpy.array_api submodule is still experimental. See NEP 47. # ignore matplotlib headless warning for pyplot ignore:Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure.:UserWarning -# Ignore DeprecationWarnings from distutils - ignore::DeprecationWarning:.*distutils - ignore:\n\n `numpy.distutils`:DeprecationWarning # Ignore DeprecationWarning from typing.mypy_plugin ignore:`numpy.typing.mypy_plugin` is deprecated:DeprecationWarning # Ignore DeprecationWarning from struct module # see https://github.com/numpy/numpy/issues/28926 - ignore:Due to \'_pack_\', the \ No newline at end of file + ignore:Due to \'_pack_\', the diff --git a/requirements/all_requirements.txt b/requirements/all_requirements.txt index 2e457cb0bdbe..ad15045c15e2 100644 --- a/requirements/all_requirements.txt +++ b/requirements/all_requirements.txt @@ -3,4 +3,5 @@ -r linter_requirements.txt -r release_requirements.txt -r test_requirements.txt +-r typing_requirements.txt -r ci_requirements.txt diff --git a/requirements/build_requirements.txt b/requirements/build_requirements.txt index 1f6eb1435cfc..18db99508a09 100644 --- a/requirements/build_requirements.txt +++ b/requirements/build_requirements.txt @@ -1,5 +1,5 @@ meson-python>=0.13.1 Cython>=3.0.6 ninja -spin==0.15 +spin build diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 824934787e10..02ff529de09c 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ -spin==0.15 +spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.30.0.7 +scipy-openblas32==0.3.31.188.0 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index 37e685fef0cc..397fa703e28d 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ -spin==0.15 +spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.30.0.7 -scipy-openblas64==0.3.30.0.7 +scipy-openblas32==0.3.31.188.0 +scipy-openblas64==0.3.31.188.0 diff --git a/requirements/delvewheel_requirements.txt b/requirements/delvewheel_requirements.txt new file mode 100644 index 000000000000..67a197c417a5 --- /dev/null +++ b/requirements/delvewheel_requirements.txt @@ -0,0 +1 @@ +delvewheel==1.12.0 ; sys_platform == 'win32' diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index b8f5cb2bd8fd..ea75103117a3 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -1,6 +1,6 @@ # doxygen required, use apt-get or dnf sphinx==7.2.6 -numpydoc==1.4 +numpydoc==1.10.0 pydata-sphinx-theme>=0.15.2 sphinx-copybutton sphinx-design diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index 019a69da687a..71e736ceed90 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ -hypothesis==6.81.1 -pytest==7.4.0 +hypothesis==6.151.9 +pytest==9.0.2 tzdata pytest-xdist diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index da6bac6f7b84..5563a31fdcc9 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,4 +1,5 @@ # keep in sync with `environment.yml` cython-lint -ruff==0.14.0 +ruff==0.15.10 GitPython>=3.1.30 +spin diff --git a/requirements/pkgconf_requirements.txt b/requirements/pkgconf_requirements.txt new file mode 100644 index 000000000000..6d366c39a7f2 --- /dev/null +++ b/requirements/pkgconf_requirements.txt @@ -0,0 +1 @@ +pkgconf==2.5.1.post1 \ No newline at end of file diff --git a/requirements/release_requirements.txt b/requirements/release_requirements.txt index eaa092560d2d..55079d795ed9 100644 --- a/requirements/release_requirements.txt +++ b/requirements/release_requirements.txt @@ -1,16 +1,9 @@ # These packages are needed for a release in addition to those needed # for building, testing, and the creation of documentation. -# download-wheels.py -urllib3 -beautifulsoup4 - # changelog.py pygithub gitpython>=3.1.30 -# uploading wheels -twine - # uploading release documentation packaging diff --git a/requirements/setuptools_requirement.txt b/requirements/setuptools_requirement.txt deleted file mode 100644 index 21f900d46078..000000000000 --- a/requirements/setuptools_requirement.txt +++ /dev/null @@ -1,2 +0,0 @@ -setuptools==65.5.1 ; python_version < '3.12' -setuptools ; python_version >= '3.12' diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 33b5756b7362..6ade6d771052 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,19 +1,13 @@ Cython -wheel==0.38.1 -setuptools==65.5.1 ; python_version < '3.12' -setuptools ; python_version >= '3.12' -hypothesis==6.142.2 -pytest==7.4.0 -pytest-cov==4.1.0 +hypothesis==6.151.9 +pytest==9.0.2 +pytest-cov==7.1.0 meson ninja; sys_platform != "emscripten" pytest-xdist pytest-timeout -# For testing types. Notes on the restrictions: -# - Mypy relies on C API features not present in PyPy -# NOTE: Keep mypy in sync with environment.yml -mypy==1.18.2; platform_python_implementation != "PyPy" -typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer tzdata +# Ensure we install quaddtype in CI for some wheel setups (mac): +numpy_quaddtype ; python_version<"3.15" and sys_platform=="darwin" diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt new file mode 100644 index 000000000000..27ef828fbb94 --- /dev/null +++ b/requirements/typing_requirements.txt @@ -0,0 +1,6 @@ +# static typing requirements that are not needed for runtime tests + +-r test_requirements.txt + +mypy==1.20.0 +pyrefly==0.60.1 diff --git a/ruff.toml b/ruff.toml index b25a34d45984..5a9f9ea3a3c6 100644 --- a/ruff.toml +++ b/ruff.toml @@ -1,6 +1,5 @@ extend-exclude = [ "numpy/__config__.py", - "numpy/distutils", "numpy/typing/_char_codes.py", "spin/cmds.py", # Submodules. @@ -20,7 +19,7 @@ line-ending = "lf" [lint] preview = true -extend-select = [ +select = [ "B", # flake8-bugbear "C4", # flake8-comprehensions "ISC", # flake8-implicit-str-concat @@ -35,9 +34,12 @@ extend-select = [ "PERF", # perflint "E", # pycodestyle/error "W", # pycodestyle/warning + "F", # pyflakes "PGH", # pygrep-hooks "PLE", # pylint/error "UP", # pyupgrade + "RUF012", # ruff: mutable-class-default + "RUF100", # ruff: unused-noqa ] ignore = [ # flake8-bugbear @@ -63,7 +65,6 @@ ignore = [ "E302", # TODO: Expected 2 blank lines, found 1 "E402", # Module level import not at top of file "E712", # Avoid equality comparisons to `True` or `False` - "E721", # TODO: Use `is` and `is not` for type comparisons, or `isinstance()` for isinstance check "E731", # Do not assign a `lambda` expression, use a `def` "E741", # Ambiguous variable name # pyflakes @@ -73,34 +74,17 @@ ignore = [ "F841", # Local variable is assigned to but never used # pyupgrade "UP015" , # Unnecessary mode argument - "UP031", # TODO: Use format specifiers instead of percent format ] [lint.per-file-ignores] -"_tempita.py" = ["B909"] -"bench_*.py" = ["B015", "B018"] -"test*.py" = ["B015", "B018", "E201", "E714"] +"_tempita.py" = ["B909", "RUF012"] +"bench_*.py" = ["B015", "B018", "RUF012"] +"test*.py" = ["B015", "B018", "E201", "E714", "RUF012"] -"numpy/_core/tests/test_arrayprint.py" = ["E501"] -"numpy/_core/tests/test_cpu_dispatcher.py" = ["E501"] -"numpy/_core/tests/test_cpu_features.py" = ["E501"] -"numpy/_core/tests/test_dtype.py" = ["E501"] -"numpy/_core/tests/test_defchararray.py" = ["E501"] -"numpy/_core/tests/test_einsum.py" = ["E501"] -"numpy/_core/tests/test_multiarray.py" = ["E501"] -"numpy/_core/tests/test_nditer*py" = ["E501"] -"numpy/_core/tests/test_umath.py" = ["E501"] -"numpy/_core/tests/test_numerictypes.py" = ["E501"] -"numpy/_core/tests/test_regression.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] -"numpy/_core/code_generators/generate_umath.py" = ["E501"] -"numpy/lib/tests/test_format.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/f2py/*py" = ["E501"] -# for typing related files we follow https://typing.python.org/en/latest/guides/writing_stubs.html#maximum-line-length -"numpy/_typing/_array_like.py" = ["E501"] -"numpy/_typing/_dtype_like.py" = ["E501"] "numpy*pyi" = ["E501"] # "useless assignments" aren't so useless when you're testing that they don't make type checkers scream "numpy/typing/tests/data/*" = ["B015", "B018", "E501"] diff --git a/tools/c_coverage/c_coverage_report.py b/tools/c_coverage/c_coverage_report.py index 917b977dc195..76846c22f564 100755 --- a/tools/c_coverage/c_coverage_report.py +++ b/tools/c_coverage/c_coverage_report.py @@ -32,9 +32,9 @@ def wrap(self, source, outfile): for i, (c, t) in enumerate(HtmlFormatter.wrap(self, source, outfile)): as_functions = self.lines.get(i - 1, None) if as_functions is not None: - yield 0, ('

[%2d]' % - (quoteattr('as ' + ', '.join(as_functions)), - len(as_functions))) + title = quoteattr('as ' + ', '.join(as_functions)) + count = len(as_functions) + yield 0, f'
[{count:2}]' else: yield 0, ' ' yield c, t @@ -84,7 +84,7 @@ def get_file(self, path): if self.prefix is None: self.prefix = path else: - self.prefix = os.path.commonprefix([self.prefix, path]) + self.prefix = os.path.commonpath([self.prefix, path]) return self.files[path] def clean_path(self, path): @@ -107,9 +107,9 @@ def write_html(self, root): fd.write("") paths = sorted(self.files.keys()) for path in paths: - fd.write('

%s

' % - (self.clean_path(path), - escape(path[len(self.prefix):]))) + href = self.clean_path(path) + label = escape(path[len(self.prefix):]) + fd.write(f'

{label}

') fd.write("") diff --git a/tools/check_installed_files.py b/tools/check_installed_files.py index 61bc49197d79..9f2fc0fc0ad3 100644 --- a/tools/check_installed_files.py +++ b/tools/check_installed_files.py @@ -46,8 +46,8 @@ def main(install_dir, tests_check): if tests_check == "--no-tests": if len(installed_test_files) > 0: - raise Exception("Test files aren't expected to be installed in %s" - ", found %s" % (INSTALLED_DIR, installed_test_files)) + raise Exception("Test files aren't expected to be installed in " + f"{INSTALLED_DIR}, found {installed_test_files}") print("----------- No test files were installed --------------") else: # Check test files detected in repo are installed @@ -84,11 +84,6 @@ def get_files(dir_to_check, kind='test'): relpath = os.path.relpath(path, dir_to_check) files[relpath] = path - if sys.version_info >= (3, 12): - files = { - k: v for k, v in files.items() if not k.startswith('distutils') - } - # ignore python files in vendored pythoncapi-compat submodule files = { k: v for k, v in files.items() if 'pythoncapi-compat' not in k diff --git a/tools/ci/array-api-xfails.txt b/tools/ci/array-api-xfails.txt index 8370099015c5..207c6c67e4e9 100644 --- a/tools/ci/array-api-xfails.txt +++ b/tools/ci/array-api-xfails.txt @@ -41,6 +41,9 @@ array_api_tests/test_operators_and_elementwise_functions.py::test_clip array_api_tests/test_signatures.py::test_extension_func_signature[fft.fftfreq] array_api_tests/test_signatures.py::test_extension_func_signature[fft.rfftfreq] +array_api_tests/test_fft.py::test_fftfreq +array_api_tests/test_fft.py::test_rfftfreq + # fails on np.repeat(np.array([]), np.array([])) test case array_api_tests/test_manipulation_functions.py::test_repeat @@ -63,3 +66,13 @@ array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] + +# complex plane special cases +array_api_tests/test_special_cases.py::test_unary[expm1((real(x_i) is +0 or real(x_i) == -0) and imag(x_i) is +0) -> 0 + 0j] +array_api_tests/test_special_cases.py::test_unary[expm1(real(x_i) is +infinity and imag(x_i) is +0) -> +infinity + 0j] +array_api_tests/test_special_cases.py::test_unary[expm1(real(x_i) is -infinity and imag(x_i) is +infinity) -> -1 + 0j] +array_api_tests/test_special_cases.py::test_unary[expm1(real(x_i) is +infinity and imag(x_i) is +infinity) -> infinity + NaN j] +array_api_tests/test_special_cases.py::test_unary[expm1(real(x_i) is -infinity and imag(x_i) is NaN) -> -1 + 0j] +array_api_tests/test_special_cases.py::test_unary[expm1(real(x_i) is +infinity and imag(x_i) is NaN) -> infinity + NaN j] +array_api_tests/test_special_cases.py::test_unary[expm1(real(x_i) is NaN and imag(x_i) is +0) -> NaN + 0j] +array_api_tests/test_special_cases.py::test_unary[tanh(real(x_i) is +infinity and isfinite(imag(x_i)) and imag(x_i) > 0) -> 1 + 0j] diff --git a/tools/ci/cirrus_arm.yml b/tools/ci/cirrus_arm.yml index 81a342f20e4e..977921d8236d 100644 --- a/tools/ci/cirrus_arm.yml +++ b/tools/ci/cirrus_arm.yml @@ -9,7 +9,7 @@ modified_clone: &MODIFIED_CLONE # it's a PR so clone the main branch then merge the changes from the PR git clone https://x-access-token:${CIRRUS_REPO_CLONE_TOKEN}@github.com/${CIRRUS_REPO_FULL_NAME}.git $CIRRUS_WORKING_DIR git fetch origin pull/$CIRRUS_PR/head:pull/$CIRRUS_PR - + # CIRRUS_BASE_BRANCH will probably be `main` for the majority of the time # However, if you do a PR against a maintenance branch we will want to # merge the PR into the maintenance branch, not main @@ -25,14 +25,14 @@ freebsd_test_task: use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' compute_engine_instance: image_project: freebsd-org-cloud-dev - image: family/freebsd-14-2 + image: family/freebsd-14-3 platform: freebsd cpu: 1 memory: 4G install_devtools_script: | pkg install -y git bash ninja ccache blas cblas lapack pkgconf - pkg install -y python311 + pkg install -y python312 <<: *MODIFIED_CLONE @@ -45,22 +45,22 @@ freebsd_test_task: prepare_env_script: | # Create a venv (the `source` command needs bash, not the default sh shell) chsh -s /usr/local/bin/bash - python3.11 -m venv .venv + python3.12 -m venv .venv source .venv/bin/activate # Minimal build and test requirements - python3.11 -m pip install -U pip - python3.11 -m pip install meson-python Cython pytest hypothesis + python3.12 -m pip install -U pip + python3.12 -m pip install meson-python Cython pytest hypothesis build_script: | chsh -s /usr/local/bin/bash source .venv/bin/activate - python3.11 -m pip install . --no-build-isolation -v -Csetup-args="-Dallow-noblas=false" + python3.12 -m pip install . --no-build-isolation -v -Csetup-args="-Dallow-noblas=false" test_script: | chsh -s /usr/local/bin/bash source .venv/bin/activate cd tools - python3.11 -m pytest --pyargs numpy -m "not slow" + python3.12 -m pytest --pyargs numpy -m "not slow" ccache -s on_failure: diff --git a/tools/ci/lsan_suppressions.txt b/tools/ci/lsan_suppressions.txt new file mode 100644 index 000000000000..74e2b335f575 --- /dev/null +++ b/tools/ci/lsan_suppressions.txt @@ -0,0 +1,33 @@ +# This file contains suppressions for the LSAN tool +# +# Reference: https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer#suppressions + +# 2 leaks when importing "numpy.exceptions" in initialize_static_globals +# (check the duplicate frame number for the second leak) +#0 0xffffb64476d0 in malloc ../../../../src/libsanitizer/asan/asan_malloc_linux.cpp:69 +#1 0xffffb598e8d0 in PyFloat_FromDouble Objects/floatobject.c:128 +#2 0xffffb5dac0fc in fill_time Modules/posixmodule.c:2622 +#3 0xffffb5dc137c in _pystat_fromstructstat Modules/posixmodule.c:2740 +#3 0xffffb5dc13b4 in _pystat_fromstructstat Modules/posixmodule.c:2743 +#4 0xffffb5dc2d2c in posix_do_stat Modules/posixmodule.c:2868 +#5 0xffffb5dc331c in os_stat_impl Modules/posixmodule.c:3235 +#6 0xffffb5dc331c in os_stat Modules/clinic/posixmodule.c.h:105 +#7 0xffffb58484d8 in _PyEval_EvalFrameDefault Python/generated_cases.c.h:2383 +#8 0xffffb5c18174 in _PyEval_EvalFrame Include/internal/pycore_ceval.h:121 +#9 0xffffb5c18174 in _PyEval_Vector Python/ceval.c:2083 +#10 0xffffb593ccf4 in _PyObject_VectorcallTstate Include/internal/pycore_call.h:169 +#11 0xffffb593ccf4 in object_vacall Objects/call.c:819 +#12 0xffffb593d168 in PyObject_CallMethodObjArgs Objects/call.c:880 +#13 0xffffb5cbc6f0 in import_find_and_load Python/import.c:3737 +#14 0xffffb5cbc6f0 in PyImport_ImportModuleLevelObject Python/import.c:3819 +#15 0xffffb5bffca0 in builtin___import___impl Python/bltinmodule.c:285 +#16 0xffffb5bffca0 in builtin___import__ Python/clinic/bltinmodule.c.h:110 +#17 0xffffb593db9c in _PyObject_VectorcallTstate Include/internal/pycore_call.h:169 +#18 0xffffb593db9c in _PyObject_CallFunctionVa Objects/call.c:552 +#19 0xffffb593df38 in PyObject_CallFunction Objects/call.c:574 +#20 0xffffb5cbdb5c in PyImport_Import Python/import.c:4011 +#21 0xffffb5cbe17c in PyImport_ImportModule Python/import.c:3434 +#22 0xffffb1c61b44 in npy_import ../numpy/_core/src/common/npy_import.h:71 +#23 0xffffb1c61b44 in initialize_static_globals ../numpy/_core/src/multiarray/npy_static_data.c:124 + +leak:initialize_static_globals diff --git a/tools/download-wheels.py b/tools/download-wheels.py deleted file mode 100644 index 38a8360f0437..000000000000 --- a/tools/download-wheels.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python3 -""" -Script to download NumPy wheels from the Anaconda staging area. - -Usage:: - - $ ./tools/download-wheels.py -w - -The default wheelhouse is ``release/installers``. - -Dependencies ------------- - -- beautifulsoup4 -- urllib3 - -Examples --------- - -While in the repository root:: - - $ python tools/download-wheels.py 1.19.0 - $ python tools/download-wheels.py 1.19.0 -w ~/wheelhouse - -""" -import argparse -import os -import re -import shutil - -import urllib3 -from bs4 import BeautifulSoup - -__version__ = "0.2" - -# Edit these for other projects. - -# The first URL is used to get the file names as it avoids the need for paging -# when the number of files exceeds the page length. Note that files/page is not -# stable and can change when the page layout changes. The second URL is used to -# retrieve the files themselves. This workaround is copied from SciPy. -NAMES_URL = "https://pypi.anaconda.org/multibuild-wheels-staging/simple/numpy/" -FILES_URL = "https://anaconda.org/multibuild-wheels-staging/numpy" - -# Name prefix of the files to download. -PREFIX = "numpy" - -# Name endings of the files to download. -WHL = r"-.*\.whl$" -ZIP = r"\.zip$" -GZIP = r"\.tar\.gz$" -SUFFIX = rf"({WHL}|{GZIP}|{ZIP})" - - -def get_wheel_names(version): - """ Get wheel names from Anaconda HTML directory. - - This looks in the Anaconda multibuild-wheels-staging page and - parses the HTML to get all the wheel names for a release version. - - Parameters - ---------- - version : str - The release version. For instance, "1.18.3". - - """ - http = urllib3.PoolManager(cert_reqs="CERT_REQUIRED") - tmpl = re.compile(rf"^.*{PREFIX}-{version}{SUFFIX}") - index_url = f"{NAMES_URL}" - index_html = http.request('GET', index_url) - soup = BeautifulSoup(index_html.data, 'html.parser') - return sorted(soup.find_all(string=tmpl)) - - -def download_wheels(version, wheelhouse, test=False): - """Download release wheels. - - The release wheels for the given NumPy version are downloaded - into the given directory. - - Parameters - ---------- - version : str - The release version. For instance, "1.18.3". - wheelhouse : str - Directory in which to download the wheels. - - """ - http = urllib3.PoolManager(cert_reqs="CERT_REQUIRED") - wheel_names = get_wheel_names(version) - - for i, wheel_name in enumerate(wheel_names): - wheel_url = f"{FILES_URL}/{version}/download/{wheel_name}" - wheel_path = os.path.join(wheelhouse, wheel_name) - with open(wheel_path, "wb") as f: - with http.request("GET", wheel_url, preload_content=False,) as r: - info = r.info() - length = int(info.get('Content-Length', '0')) - if length == 0: - length = 'unknown size' - else: - length = f"{(length / 1024 / 1024):.2f}MB" - print(f"{i + 1:<4}{wheel_name} {length}") - if not test: - shutil.copyfileobj(r, f) - print(f"\nTotal files downloaded: {len(wheel_names)}") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "version", - help="NumPy version to download.") - parser.add_argument( - "-w", "--wheelhouse", - default=os.path.join(os.getcwd(), "release", "installers"), - help="Directory in which to store downloaded wheels\n" - "[defaults to /release/installers]") - parser.add_argument( - "-t", "--test", - action='store_true', - help="only list available wheels, do not download") - - args = parser.parse_args() - - wheelhouse = os.path.expanduser(args.wheelhouse) - if not os.path.isdir(wheelhouse): - raise RuntimeError( - f"{wheelhouse} wheelhouse directory is not present." - " Perhaps you need to use the '-w' flag to specify one.") - - download_wheels(args.version, wheelhouse, test=args.test) diff --git a/tools/functions_missing_types.py b/tools/functions_missing_types.py index 8149a0106575..9362c1478bd0 100755 --- a/tools/functions_missing_types.py +++ b/tools/functions_missing_types.py @@ -33,11 +33,9 @@ # Accidentally public, deprecated, or shouldn't be used "Tester", "_core", - "get_array_wrap", "int_asbuffer", "numarray", "oldnumeric", - "safe_eval", "test", "typeDict", # Builtins diff --git a/tools/pyright_completeness.py b/tools/pyright_completeness.py new file mode 100644 index 000000000000..f1c52913a9c5 --- /dev/null +++ b/tools/pyright_completeness.py @@ -0,0 +1,77 @@ +""" +Run PyRight's `--verifytypes` and check that its reported type completeness is above +a minimum threshold. + +Requires `basedpyright` to be installed in the environment. + +Example usage: + + spin run python tools/pyright_completeness.py --verifytypes numpy --ignoreexternal \ + --exclude-like '*.tests.*' '*.conftest.*' + +We use `--ignoreexternal` to avoid "partially unknown" reports coming from the stdlib +`numbers` module, see https://github.com/microsoft/pyright/discussions/9911. +""" + +import argparse +import fnmatch +import json +import subprocess +import sys +from collections.abc import Sequence + + +def main(argv: Sequence[str] | None = None) -> int: + parser = argparse.ArgumentParser() + parser.add_argument( + "--exclude-like", + required=False, + nargs="*", + type=str, + help="Exclude symbols whose names matches this glob pattern", + ) + args, unknownargs = parser.parse_known_args(argv) + pyright_args = list(unknownargs) + if "--outputjson" not in pyright_args: + pyright_args.append("--outputjson") + return run_pyright_with_coverage(pyright_args, args.exclude_like) + + +def run_pyright_with_coverage( + pyright_args: list[str], + exclude_like: Sequence[str], +) -> int: + result = subprocess.run( + ["basedpyright", *pyright_args], + capture_output=True, + text=True, + ) + + try: + data = json.loads(result.stdout) + except json.decoder.JSONDecodeError: + sys.stdout.write(result.stdout) + sys.stderr.write(result.stderr) + return 1 + + if exclude_like: + symbols = data["typeCompleteness"]["symbols"] + matched_symbols = [ + x + for x in symbols + if not any(fnmatch.fnmatch(x["name"], pattern) for pattern in exclude_like) + and x["isExported"] + ] + covered = sum(x["isTypeKnown"] for x in matched_symbols) / len(matched_symbols) + else: + covered = data["typeCompleteness"]["completenessScore"] + sys.stderr.write(result.stderr) + if covered < 1: + sys.stdout.write(f"Coverage {covered:.1%} is below minimum required 100%\n") + return 1 + sys.stdout.write("Coverage is at 100%\n") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/tools/refguide_check.py b/tools/refguide_check.py index da881574215f..6b79ba037e2c 100644 --- a/tools/refguide_check.py +++ b/tools/refguide_check.py @@ -460,7 +460,7 @@ def resolve(name, is_label=False): if not success: output += " " + "-" * 72 + "\n" for lineno, line in enumerate(text.splitlines()): - output += " %-4d %s\n" % (lineno + 1, line) + output += f" {lineno + 1:<4} {line}\n" output += " " + "-" * 72 + "\n\n" if dots: @@ -520,10 +520,10 @@ def check_rest(module, names, dots=True): traceback.format_exc())) continue - m = re.search("([\x00-\x09\x0b-\x1f])", text) # noqa: RUF039 + m = re.search("([\x00-\x09\x0b-\x1f])", text) if m: - msg = ("Docstring contains a non-printable character %r! " - "Maybe forgot r\"\"\"?" % (m.group(1),)) + msg = (f"Docstring contains a non-printable character {m.group(1)!r}! " + "Maybe forgot r\"\"\"?") results.append((full_name, False, msg)) continue diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt index 30f4cd120cc9..e113dac7dc47 100644 --- a/tools/stubtest/allowlist.txt +++ b/tools/stubtest/allowlist.txt @@ -7,16 +7,12 @@ numpy\.conftest.* numpy\.random\._generator\.__test__ numpy(\.\w+)?\.tests.* -# system-dependent extended precision types -numpy(\..+)?\.float(96|128) -numpy(\..+)?\.complex(192|256) - # system-dependent SIMD constants numpy\._core\._simd\.\w+ # these are always either float96/complex192 or float128/complex256 numpy\.__all__ -numpy\._?core\.__all__ +numpy\._core\.__all__ numpy\._?core\.numeric\.__all__ numpy\._?core\.numerictypes\.__all__ numpy\.matlib\.__all__ @@ -39,155 +35,8 @@ numpy\.(\w+\.)*integer\.bit_count numpy\.(\w+\.)*floating\.as_integer_ratio numpy\.(\w+\.)*floating\.is_integer numpy\.(\w+\.)*complexfloating\.__complex__ +# https://github.com/numpy/numpy/issues/30445#issuecomment-3665484402 +numpy\.(\w+\.)*generic\.__hash__ # intentionally missing deprecated module stubs -numpy\.core\._dtype -numpy\.core\._dtype_ctypes -numpy\.core\._internal -numpy\.core\._multiarray_umath.* -numpy\.core\.arrayprint.* -numpy\.core\.defchararray.* -numpy\.core\.einsumfunc.* -numpy\.core\.fromnumeric.* -numpy\.core\.function_base.* -numpy\.core\.getlimits.* -numpy\.core\.multiarray.* -numpy\.core\.numeric.* -numpy\.core\.overrides -numpy\.core\.records.* -numpy\.core\.shape_base.* -numpy\.core\.umath.* numpy\.typing\.mypy_plugin - -# ufuncs, see https://github.com/python/mypy/issues/20223 -numpy\.(\w+\.)*abs -numpy\.(\w+\.)*absolute -numpy\.(\w+\.)*acos -numpy\.(\w+\.)*acosh -numpy\.(\w+\.)*add -numpy\.(\w+\.)*arccos -numpy\.(\w+\.)*arccosh -numpy\.(\w+\.)*arcsin -numpy\.(\w+\.)*arcsinh -numpy\.(\w+\.)*arctan -numpy\.(\w+\.)*arctan2 -numpy\.(\w+\.)*arctanh -numpy\.(\w+\.)*asin -numpy\.(\w+\.)*asinh -numpy\.(\w+\.)*atan -numpy\.(\w+\.)*atan2 -numpy\.(\w+\.)*atanh -numpy\.(\w+\.)*bitwise_and -numpy\.(\w+\.)*bitwise_count -numpy\.(\w+\.)*bitwise_invert -numpy\.(\w+\.)*bitwise_left_shift -numpy\.(\w+\.)*bitwise_not -numpy\.(\w+\.)*bitwise_or -numpy\.(\w+\.)*bitwise_right_shift -numpy\.(\w+\.)*bitwise_xor -numpy\.(\w+\.)*cbrt -numpy\.(\w+\.)*ceil -numpy\.(\w+\.)*conj -numpy\.(\w+\.)*conjugate -numpy\.(\w+\.)*copysign -numpy\.(\w+\.)*cos -numpy\.(\w+\.)*cosh -numpy\.(\w+\.)*deg2rad -numpy\.(\w+\.)*degrees -numpy\.(\w+\.)*divide -numpy\.(\w+\.)*divmod -numpy\.(\w+\.)*equal -numpy\.(\w+\.)*exp -numpy\.(\w+\.)*exp2 -numpy\.(\w+\.)*expm1 -numpy\.(\w+\.)*fabs -numpy\.(\w+\.)*float_power -numpy\.(\w+\.)*floor -numpy\.(\w+\.)*floor_divide -numpy\.(\w+\.)*fmax -numpy\.(\w+\.)*fmin -numpy\.(\w+\.)*fmod -numpy\.(\w+\.)*frexp -numpy\.(\w+\.)*gcd -numpy\.(\w+\.)*greater -numpy\.(\w+\.)*greater_equal -numpy\.(\w+\.)*heaviside -numpy\.(\w+\.)*hypot -numpy\.(\w+\.)*invert -numpy\.(\w+\.)*isfinite -numpy\.(\w+\.)*isinf -numpy\.(\w+\.)*isnan -numpy\.(\w+\.)*isnat -numpy\.(\w+\.)*lcm -numpy\.(\w+\.)*ldexp -numpy\.(\w+\.)*left_shift -numpy\.(\w+\.)*less -numpy\.(\w+\.)*less_equal -numpy\.(\w+\.)*log -numpy\.(\w+\.)*log10 -numpy\.(\w+\.)*log1p -numpy\.(\w+\.)*log2 -numpy\.(\w+\.)*logaddexp -numpy\.(\w+\.)*logaddexp2 -numpy\.(\w+\.)*logical_and -numpy\.(\w+\.)*logical_not -numpy\.(\w+\.)*logical_or -numpy\.(\w+\.)*logical_xor -numpy\.(\w+\.)*matmul -numpy\.(\w+\.)*matvec -numpy\.(\w+\.)*maximum -numpy\.(\w+\.)*minimum -numpy\.(\w+\.)*mod -numpy\.(\w+\.)*modf -numpy\.(\w+\.)*multiply -numpy\.(\w+\.)*negative -numpy\.(\w+\.)*nextafter -numpy\.(\w+\.)*not_equal -numpy\.(\w+\.)*positive -numpy\.(\w+\.)*pow -numpy\.(\w+\.)*power -numpy\.(\w+\.)*rad2deg -numpy\.(\w+\.)*radians -numpy\.(\w+\.)*reciprocal -numpy\.(\w+\.)*remainder -numpy\.(\w+\.)*right_shift -numpy\.(\w+\.)*rint -numpy\.(\w+\.)*sign -numpy\.(\w+\.)*signbit -numpy\.(\w+\.)*sin -numpy\.(\w+\.)*sinh -numpy\.(\w+\.)*spacing -numpy\.(\w+\.)*sqrt -numpy\.(\w+\.)*square -numpy\.(\w+\.)*subtract -numpy\.(\w+\.)*tan -numpy\.(\w+\.)*tanh -numpy\.(\w+\.)*true_divide -numpy\.(\w+\.)*trunc -numpy\.(\w+\.)*vecdot -numpy\.(\w+\.)*vecmat -numpy\.(\w+\.)*isalnum -numpy\.(\w+\.)*isalpha -numpy\.(\w+\.)*isdecimal -numpy\.(\w+\.)*isdigit -numpy\.(\w+\.)*islower -numpy\.(\w+\.)*isnumeric -numpy\.(\w+\.)*isspace -numpy\.(\w+\.)*istitle -numpy\.(\w+\.)*isupper -numpy\.(\w+\.)*str_len -numpy\._core\._methods\.umr_bitwise_count -numpy\._core\._umath_tests\.always_error -numpy\._core\._umath_tests\.always_error_gufunc -numpy\._core\._umath_tests\.always_error_unary -numpy\._core\._umath_tests\.conv1d_full -numpy\._core\._umath_tests\.cross1d -numpy\._core\._umath_tests\.euclidean_pdist -numpy\._core\._umath_tests\.indexed_negative -numpy\._core\._umath_tests\.inner1d -numpy\._core\._umath_tests\.inner1d_no_doc -numpy\._core\._umath_tests\.matrix_multiply -numpy\.linalg\._umath_linalg\.qr_complete -numpy\.linalg\._umath_linalg\.qr_reduced -numpy\.linalg\._umath_linalg\.solve -numpy\.linalg\._umath_linalg\.solve1 diff --git a/tools/stubtest/allowlist_py311.txt b/tools/stubtest/allowlist_py311.txt deleted file mode 100644 index 4413f164f582..000000000000 --- a/tools/stubtest/allowlist_py311.txt +++ /dev/null @@ -1,3 +0,0 @@ -# python == 3.11.* - -numpy\.distutils\..* diff --git a/tools/stubtest/allowlist_py312.txt b/tools/stubtest/allowlist_py312.txt deleted file mode 100644 index 867b2f1870a3..000000000000 --- a/tools/stubtest/allowlist_py312.txt +++ /dev/null @@ -1,8 +0,0 @@ -# python >= 3.12 - -# false positive "... is not a Union" errors -numpy\.typing\.ArrayLike -numpy\.typing\.DTypeLike - -# only exists before Python 3.12 -numpy\.f2py\._backends\._distutils diff --git a/tools/stubtest/mypy.ini b/tools/stubtest/mypy.ini index ded91d7d56f3..4c75171acffe 100644 --- a/tools/stubtest/mypy.ini +++ b/tools/stubtest/mypy.ini @@ -5,12 +5,11 @@ exclude = (?x)( .+\.py$ | _build_utils/ | _core/code_generators/ - | distutils/ ) ) namespace_packages = False -enable_error_code = ignore-without-code, redundant-expr, truthy-bool +enable_error_code = deprecated, ignore-without-code, redundant-expr, truthy-bool warn_unreachable = False strict = True strict_bytes = True diff --git a/tools/swig/numpy.i b/tools/swig/numpy.i index 747446648c8b..a7392abb20d9 100644 --- a/tools/swig/numpy.i +++ b/tools/swig/numpy.i @@ -40,6 +40,7 @@ #define NO_IMPORT_ARRAY #endif #include "stdio.h" +#include #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include %} @@ -457,11 +458,11 @@ void free_cap(PyObject * cap) { for (i = 0; i < n-1; i++) { - sprintf(s, "%d, ", exact_dimensions[i]); - strcat(dims_str,s); + snprintf(s, sizeof(s), "%d, ", exact_dimensions[i]); + strncat(dims_str, s, sizeof(dims_str) - strlen(dims_str) - 1); } - sprintf(s, " or %d", exact_dimensions[n-1]); - strcat(dims_str,s); + snprintf(s, sizeof(s), " or %d", exact_dimensions[n-1]); + strncat(dims_str, s, sizeof(dims_str) - strlen(dims_str) - 1); PyErr_Format(PyExc_TypeError, "Array must have %s dimensions. Given array has %d dimensions", dims_str, @@ -497,20 +498,20 @@ void free_cap(PyObject * cap) { if (size[i] == -1) { - sprintf(s, "*,"); + snprintf(s, sizeof(s), "*,"); } else { - sprintf(s, "%ld,", (long int)size[i]); + snprintf(s, sizeof(s), "%ld,", (long int)size[i]); } - strcat(desired_dims,s); + strncat(desired_dims, s, sizeof(desired_dims) - strlen(desired_dims) - 1); } len = strlen(desired_dims); desired_dims[len-1] = ']'; for (i = 0; i < n; i++) { - sprintf(s, "%ld,", (long int)array_size(ary,i)); - strcat(actual_dims,s); + snprintf(s, sizeof(s), "%ld,", (long int)array_size(ary,i)); + strncat(actual_dims, s, sizeof(actual_dims) - strlen(actual_dims) - 1); } len = strlen(actual_dims); actual_dims[len-1] = ']'; diff --git a/tools/swig/test/testFarray.py b/tools/swig/test/testFarray.py index 75bf99c054cb..3798029dbe4b 100755 --- a/tools/swig/test/testFarray.py +++ b/tools/swig/test/testFarray.py @@ -2,6 +2,7 @@ import os import sys import unittest + from distutils.util import get_platform import numpy as np diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index 381c329a5372..5a8e69f04cdf 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -48,6 +48,8 @@ EOF fi if [[ $RUNNER_OS == "Windows" ]]; then - # delvewheel is the equivalent of delocate/auditwheel for windows. - python -m pip install delvewheel wheel + python -m pip install -r $PROJECT_DIR/requirements/delvewheel_requirements.txt + # pkgconf - carries out the role of pkg-config. + # Alternative is pkgconfiglite that you have to install with choco + python -m pip install -r $PROJECT_DIR/requirements/pkgconf_requirements.txt fi diff --git a/vendored-meson/meson b/vendored-meson/meson index e72c717199fa..5d5a3d478da1 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit e72c717199fa18d34020c7c97f9de3f388c5e055 +Subproject commit 5d5a3d478da115c812be77afa651db2492d52171