Compare commits

..

13 Commits

Author SHA1 Message Date
Pauli Virtanen 14142ff70d REL: set version to 1.1.0 2018-05-05 16:07:41 +02:00
Pauli Virtanen 97722e1db1 DOC: update 1.1.0 release notes 2018-05-04 21:32:24 +02:00
Pauli Virtanen a6b432607d CI: run tests with pypy3 on circleci (#8783)
* MAINT: make parallelism in tools/cythonize.py adjustable

* CI: run tests with pypy3 on circleci

(cherry picked from commit bdfce64402)
2018-05-04 21:25:21 +02:00
Ralf Gommers c345647dfe BLD: remove special/tests/data/local/ from sdist. (#8784)
Same as for the boost and gsl subdirectories.

(cherry picked from commit f0a5bc5153)
2018-05-04 21:21:09 +02:00
Pauli Virtanen 2e99d5b85c Merge pull request #8742 from bagibence/fix_8686
BUG: optimize: fix division by zero in trust-region optimization methods

Check convergence condition before starting iteration. Solves problem
with division by zero if we have a Jacobian with zero magnitude.
Add corresponding tests.

(cherry picked from commit fc09f2f42e)
2018-05-04 21:20:08 +02:00
Pauli Virtanen 7c1ba64d3a REL: set version to 1.1.0rc2.dev0 2018-05-04 21:18:34 +02:00
Pauli Virtanen 4b50fdea8e REL: set version to 1.1.0rc1 2018-04-15 15:20:45 +02:00
Pauli Virtanen 248e1ff705 BUG: signal: restore backward-compatible import for hanning
Ensure scipy.signal.hanning remains available.

(cherry picked from commit 6da830e3bf)
2018-04-15 14:39:24 +02:00
Pauli Virtanen 7789b6e851
DOC: update 1.1.0 release notes (#8730) 2018-04-15 12:36:41 +00:00
Pauli Virtanen 235932313d MAINT: remove pyproject.toml
The implementation of build isolation in pip 10.0.0 does not work with
the present pyproject.toml because of missing features.
2018-04-15 14:07:40 +02:00
Pauli Virtanen a6900fe88c CI: don't use pyproject.toml in CI builds + fix py35 travis (#8724)
Pass on --no-build-isolation to pip, so that it does not break the numpy
version fixing etc. careful environment setup.

(cherry picked from commit e665e3c0a0)
2018-04-14 20:39:39 +02:00
P. L. Lim 5b297b7622 MAINT: stats: fix np.testing incompatible with numpy 1.15 (#8720)
setastest is not required for pytest, so remove it.

(cherry picked from commit 394b4719d7)
2018-04-13 23:34:09 +02:00
Pauli Virtanen 9f479a5ee1 TST: skip a problematic linalg test for 1.1.x
See gh-7500 and gh-8064.

(cherry picked from commit 23b3fe6721)
2018-04-12 21:33:01 +02:00
3602 changed files with 428964 additions and 781817 deletions

View File

@ -1,64 +1,15 @@
version: 2.1
# Aliases to reuse
_defaults: &defaults
docker:
# CircleCI maintains a library of pre-built images
# documented at https://circleci.com/developer/images/image/cimg/python
- image: cimg/python:3.13.10
working_directory: ~/repo
commands:
check-skip:
steps:
- run:
name: Check-skip
command: |
if [ ! -d "scipy" ]; then
echo "Build was not run due to skip, exiting job ${CIRCLE_JOB} for PR ${CIRCLE_PULL_REQUEST}."
circleci-agent step halt;
fi
export git_log=$(git log --max-count=1 --pretty=format:"%B" | tr "\n" " ")
echo "Got commit message:"
echo "${git_log}"
if [[ -v CIRCLE_PULL_REQUEST ]] && ([[ "$git_log" == *"[skip circle]"* ]] || [[ "$git_log" == *"[circle skip]"* ]] || [[ "$git_log" == *"[lint only]"* ]]); then
echo "Skip detected, exiting job ${CIRCLE_JOB} for PR ${CIRCLE_PULL_REQUEST}."
circleci-agent step halt;
fi
apt-install:
steps:
- run:
name: Install apt packages
command: |
sudo apt-get update
sudo apt-get install libopenblas-dev gfortran libgmp-dev libmpfr-dev ccache
merge:
steps:
- run:
name: merge with upstream
command: |
echo $(git log -1 --pretty=%B) | tee gitlog.txt
echo ${CI_PULL_REQUEST//*pull\//} | tee merge.txt
if [[ $(cat merge.txt) != "" ]]; then
echo "Merging $(cat merge.txt)";
git remote add upstream https://github.com/scipy/scipy.git;
git pull --ff-only upstream "refs/pull/$(cat merge.txt)/merge";
git fetch upstream main;
fi
version: 2
jobs:
# Build SciPy from source
build_scipy:
<<: *defaults
build_docs:
docker:
# CircleCI maintains a library of pre-built images
# documented at https://circleci.com/docs/2.0/circleci-images/
- image: circleci/python:3.6.1
working_directory: ~/repo
steps:
- checkout
- check-skip
- merge
- apt-install
- run:
name: update submodules
@ -66,149 +17,58 @@ jobs:
git submodule init
git submodule update
- restore_cache:
keys:
- deps_ccache-{{ .Branch }}
- deps_ccache
- run:
name: install Debian dependencies
command: |
sudo apt-get update
sudo apt-get install libatlas-dev libatlas-base-dev liblapack-dev gfortran libgmp-dev libmpfr-dev libfreetype6-dev libpng-dev zlib1g-dev texlive-fonts-recommended texlive-latex-recommended texlive-latex-extra texlive-generic-extra latexmk texlive-xetex
- run:
name: setup Python venv
command: |
pip install numpy cython pybind11 pythran ninja meson
pip install -r requirements/doc.txt
# asv upper bound due to issue in gh-23611, remove when that is resolved
pip install mpmath gmpy2 "asv<0.6.5" pooch threadpoolctl spin marray
# extra benchmark deps
pip install pyfftw cffi pytest
- run:
name: build SciPy
command: |
spin build -j2
- save_cache:
key: deps_ccache-{{ .Branch }}
paths:
- ~/.ccache
- ~/.cache/pip
- run:
name: ccache performance
command: |
ccache -s
- persist_to_workspace:
root: ~/
paths:
- .
# Build docs
build_docs:
<<: *defaults
steps:
- attach_workspace:
at: ~/
- check-skip
- apt-install
python3 -m venv venv
. venv/bin/activate
pip install --install-option="--no-cython-compile" Cython==0.25
pip install numpy
pip install nose mpmath argparse Pillow codecov matplotlib Sphinx==1.7.2
- run:
name: build docs
no_output_timeout: 25m
command: |
export PYTHONPATH=$PWD/build-install/usr/lib/python3.13/site-packages
export SCIPY_ARRAY_API=1
spin docs -j2 2>&1 | tee sphinx_log.txt
- run:
name: Check sphinx log for warnings (which are treated as errors)
when: always
command: |
! grep "^.* WARNING: .*$" sphinx_log.txt
. venv/bin/activate
export SHELL=$(which bash)
python -u runtests.py -g --shell -- -c 'make -C doc PYTHON=python html-scipyorg latex'
make -C doc/build/latex all-pdf LATEXOPTS="-file-line-error -halt-on-error"
cp -f doc/build/latex/scipy-ref.pdf doc/build/html-scipyorg/
- store_artifacts:
path: doc/build/html
destination: html
path: doc/build/html-scipyorg
destination: html-scipyorg
- persist_to_workspace:
root: doc/build
paths:
- html
- html-scipyorg
# Run benchmarks
run_benchmarks:
<<: *defaults
steps:
- attach_workspace:
at: ~/
- check-skip
- apt-install
- run:
name: run asv
no_output_timeout: 30m
command: |
export PYTHONPATH=$PWD/build-install/usr/lib/python3.13/site-packages
cd benchmarks
asv machine --machine CircleCI
export SCIPY_GLOBAL_BENCH_NUMTRIALS=1
export SCIPY_ALLOW_BENCH_IMPORT_ERRORS=0
export OPENBLAS_NUM_THREADS=1
time asv --config asv.conf.json run -m CircleCI --quick --python=same --bench '^((?!BenchGlobal|QuadraticAssignment).)*$'
asv --config asv.conf.json publish
- store_artifacts:
path: benchmarks/html
destination: html-benchmarks
# Reference guide checking
refguide_check:
<<: *defaults
steps:
- attach_workspace:
at: ~/
- check-skip
- apt-install
- run:
name: refguide_check
no_output_timeout: 25m
command: |
sudo apt-get install -y wamerican-small
export PYTHONPATH=$PWD/build-install/usr/lib/python3.13/site-packages
spin refguide-check
- run:
name: smoke-docs
no_output_timeout: 35m
command: |
pip install matplotlib hypothesis
pip install scipy-doctest
spin smoke-docs
- run:
name: smoke-tutorials
no_output_timeout: 10m
command: |
spin smoke-tutorials
# Upload build output to scipy/devdocs repository, using SSH deploy keys.
# The keys are only available for builds on main branch.
# The keys are only available for builds on master branch.
# https://developer.github.com/guides/managing-deploy-keys/
# https://circleci.com/docs/2.0/configuration-reference/#add_ssh_keys
deploy:
<<: *defaults
docker:
- image: circleci/python:3.6.1
working_directory: ~/repo
steps:
- attach_workspace:
at: /tmp/build
- add_ssh_keys:
fingerprints:
- "1d:47:cf:2e:ea:7c:15:cf:ec:bb:1f:44:e2:56:16:d3"
- "08:18:07:68:71:e3:f9:5f:bd:95:f0:6a:df:a9:47:a2"
- run:
name: upload
@ -222,7 +82,7 @@ jobs:
(git checkout --orphan tmp && git branch -D gh-pages || true);
git checkout --orphan gh-pages;
git reset --hard;
cp -R /tmp/build/html/. .;
cp -R /tmp/build/html-scipyorg/. .;
touch .nojekyll;
git config --global user.email "scipy-circleci-bot@nomail";
git config --global user.name "scipy-circleci-bot";
@ -231,23 +91,65 @@ jobs:
git commit -m "Docs build of $CIRCLE_SHA1";
git push --set-upstream origin gh-pages --force
# Run test suite on pypy3
pypy3:
docker:
- image: pypy:3-6.0.0
steps:
- restore_cache:
keys:
- pypy3-ccache-{{ .Branch }}
- pypy3-ccache
- checkout
- run:
name: setup
command: |
apt-get -yq update
apt-get -yq install libatlas-dev libatlas-base-dev liblapack-dev gfortran ccache
ccache -M 512M
export CCACHE_COMPRESS=1
export NPY_NUM_BUILD_JOBS=`pypy3 -c 'import multiprocessing as mp; print(mp.cpu_count())'`
export PATH=/usr/lib/ccache:$PATH
# XXX: use "numpy>=1.15.0" when it's released
pypy3 -mpip install --upgrade pip setuptools wheel
pypy3 -mpip install --no-build-isolation --extra-index https://antocuni.github.io/pypy-wheels/ubuntu pytest pytest-xdist Tempita "Cython>=0.28.2" mpmath
pypy3 -mpip install --no-build-isolation git+https://github.com/numpy/numpy.git@db552b5b6b37f2ff085b304751d7a2ebed26adc9
- run:
name: build
command: |
export CCACHE_COMPRESS=1
export PATH=/usr/lib/ccache:$PATH
# Limit parallelism for Cythonization to 4 processes, to
# avoid exceeding CircleCI memory limits
export SCIPY_NUM_CYTHONIZE_JOBS=4
export NPY_NUM_BUILD_JOBS=`pypy3 -c 'import multiprocessing as mp; print(mp.cpu_count())'`
# Less aggressive optimization flags for faster compilation
OPT="-O1" FOPT="-O1" pypy3 setup.py build
- save_cache:
key: pypy3-ccache-{{ .Branch }}-{{ .BuildNum }}
paths:
- ~/.ccache
- ~/.cache/pip
- run:
name: test
command: |
# CircleCI has 4G memory limit, play it safe
export SCIPY_AVAILABLE_MEM=1G
pypy3 runtests.py -- -rfEX -n 3 --durations=30
workflows:
version: 2
default:
jobs:
- build_scipy
- build_docs:
requires:
- build_scipy
- run_benchmarks:
requires:
- build_scipy
- refguide_check:
requires:
- build_scipy
- build_docs
- deploy:
requires:
- build_docs
filters:
branches:
only: main
only: master
- pypy3

View File

@ -1,6 +1,3 @@
[run]
branch = True
include = */scipy/*
omit =
scipy/signal/_max_len_seq_inner.py
disable_warnings = include-ignored

View File

@ -1,20 +0,0 @@
{
// More info about Features: https://containers.dev/features
"hostRequirements": {
"memory": "8gb" // Added this because compilation fails on scipy.sparse if we have less than 8gb of RAM
},
"image": "mcr.microsoft.com/devcontainers/universal:2",
"features": {},
"onCreateCommand": ".devcontainer/setup.sh",
"postCreateCommand": "",
"customizations": {
"vscode": {
"extensions": [
"ms-python.python"
],
"settings": {}
}
}
}

View File

@ -1,17 +0,0 @@
#!/bin/bash
set -e
"${SHELL}" <(curl -Ls micro.mamba.pm/install.sh) < /dev/null
conda init --all
micromamba shell init -s bash
micromamba env create -f environment.yml --yes
# Note that `micromamba activate scipy-dev` doesn't work, it must be run by the
# user (same applies to `conda activate`)
git submodule update --init
# Enables users to activate environment without having to specify the full path
echo "envs_dirs:
- /home/codespace/micromamba/envs" > /opt/conda/.condarc

View File

@ -1,29 +0,0 @@
root = true
[*.{c,cxx,h,hpp}]
# https://numpy.org/neps/nep-0045-c_style_guide.html
indent_size = 4
indent_style = space
max_line_length = 88
trim_trailing_whitespace = true
[*.{md,py,pyi,pxd}]
# https://peps.python.org/pep-0008/
charset = utf-8
end_of_line = lf
indent_size = 4
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
[*.py]
# Keep in sync with `tools/lint.toml`
# https://docs.astral.sh/ruff/settings/#line-length
max_line_length = 88
[*.pyi]
# https://typing.readthedocs.io/en/latest/guides/writing_stubs.html#style-guide
max_line_length = 130
[*.md]
max_line_length = 80

View File

@ -1,40 +0,0 @@
# Adopt ruff linter and "lint everything" (gh-17878)
6c4a3f3551f9ee52a75c8c3999a57bed8ea67deb
# Whitespace clean up in stats (gh-17892)
076710d528273563048c2b05bb3ad2f8b03fdb95
# Cython lint
c1c91d3933291046c72a8e1569c0c4703a51ba45
# Release entries move
b9cb3d075dad2b1ee9c145f9e69192d6eeda4f1e
# Rename `fft/tests/test_numpy.py` to `test_basic.py`
79099da5aefd2f76092b999823e45a6519e41005
# Line length clean up in misc (gh-19491)
8870c5d01a6f91a737db4642343ef997a63c8584
# Line length clean up in fftpack (gh-19503)
39787f146137b0ec9d31907989c6f104ba7eed76
# Line length clean up in `scipyoptdoc.py` (gh-19505)
49858c5fb2bf479b17a0bf42a7397d73f45f1b31
# Line length clean up in odr (gh-19514)
877f1cc34ad2147340684bcadfbf884d47f5a954
# Line length clean up in fft (gh-19520)
9d41d85c472438cf17211b843de7c770868d25a4
# Blanket noqa clean up (gh-19529)
664a42c9a7945a0a47f876baa6d27fb4c31c6847
# Unused noqa clean up (gh-19529)
05b872da4f498d923dbf5f5f38691b9a21580914
# Clean up for UP lint rules (gh-19516)
9cf72e4599e0a22902ef3bc1a42e645240b1734d
# Clean up for B028 lint rule (gh-19623)
81662226aac5c6b978825b381d2793b16d3b354f
# Clean up for enabling line length check (gh-19609)
fa9f13e6906e7d00510d593f7f982db30e4e4f14
# Using clang-format with `special` C++ files (gh-19613)
ecef3490da68a0c53ba543c618bab0c8e15dccee
# Change to indent width of 4 in clang-format (gh-19660)
852776a3fe0f3d08c0bed9174f6ac33f653a8677
# Style cleanup in `pyproject.toml`
7b921fd28659b02544bfb46368ddadd1048b37aa
# Style cleanup to always `import numpy as np`
ceafa8e730887b81cf10d483ce375559ebd1de09
# Clean-up for UP031, UP032 lint rules (gh-21029)
d1b5af016e907e037136b7a38e485437165490f2

70
.gitattributes vendored
View File

@ -1,36 +1,60 @@
# Excluding files from an sdist generated by meson-python
#########################################################
# Note: when adding to this list, be aware that you need to commit your changes
# before they take effect (can be confusing during testing)
#
# Note: git submodules cannot be handled in this file. For removing
# files and directories from git submodules, see `tools/trim_sdist_content.py`
# This is a Meson "dist script", run during sdist generation only.
.circleci/* export-ignore
.github/* export-ignore
ci/* export-ignore
.coveragerc export-ignore
.git* export-ignore
*.yml export-ignore
*.yaml export-ignore
.mailmap export-ignore
# Dealing with line endings
###########################
* text=auto
tools/win32/build_scripts/nsis_scripts/*.nsi.in eol=crlf
# Don't want to convert line endings of this file, causes test failure on
# Windows
scipy/io/matlab/tests/data/japanese_utf8.txt binary
# Autogenerated files
scipy/special/_ufuncs_cxx.pyx binary
scipy/special/_ufuncs_cxx.pxd binary
scipy/special/_ufuncs.pyx binary
scipy/special/_ufuncs_defs.h binary
scipy/special/cython_special.pxd binary
scipy/special/cython_special.pyx binary
scipy/linalg/_blas_subroutine_wrappers.f binary
scipy/linalg/_blas_subroutines.h binary
scipy/linalg/_lapack_subroutine_wrappers.f binary
scipy/linalg/_lapack_subroutines.h binary
scipy/linalg/cython_blas.pxd binary
scipy/linalg/cython_blas.pyx binary
scipy/linalg/cython_blas_signatures.txt binary
scipy/linalg/cython_lapack.pxd binary
scipy/linalg/cython_lapack.pyx binary
scipy/linalg/cython_lapack_signatures.txt binary
# SWIG-generated files
scipy/sparse/sparsetools/bsr.py binary
scipy/sparse/sparsetools/bsr_wrap.cxx binary
scipy/sparse/sparsetools/coo.py binary
scipy/sparse/sparsetools/coo_wrap.cxx binary
scipy/sparse/sparsetools/csc.py binary
scipy/sparse/sparsetools/csc_wrap.cxx binary
scipy/sparse/sparsetools/csr.py binary
scipy/sparse/sparsetools/csr_wrap.cxx binary
scipy/sparse/sparsetools/dia.py binary
scipy/sparse/sparsetools/dia_wrap.cxx binary
scipy/sparse/sparsetools/csgraph.py binary
scipy/sparse/sparsetools/csgraph_wrap.cxx binary
# Numerical data files
scipy/special/tests/data/*.txt binary
scipy/special/tests/data/*/*.txt binary
scipy/special/tests/data/*/*/*.txt binary
# SWIG-generated files
scipy/sparse/sparsetools/bsr.py binary
scipy/sparse/sparsetools/bsr_wrap.cxx binary
scipy/sparse/sparsetools/coo.py binary
scipy/sparse/sparsetools/coo_wrap.cxx binary
scipy/sparse/sparsetools/csc.py binary
scipy/sparse/sparsetools/csc_wrap.cxx binary
scipy/sparse/sparsetools/csr.py binary
scipy/sparse/sparsetools/csr_wrap.cxx binary
scipy/sparse/sparsetools/dia.py binary
scipy/sparse/sparsetools/dia_wrap.cxx binary
scipy/sparse/sparsetools/csgraph.py binary
scipy/sparse/sparsetools/csgraph_wrap.cxx binary
# Release notes, reduce number of conflicts.
doc/release/*.rst merge=union

78
.github/CODEOWNERS vendored
View File

@ -1,78 +0,0 @@
# The purpose of this file is to trigger review requests when PRs touch
# particular files. Those reviews are not mandatory, however it's often useful
# to have an expert pinged who is interested in only one part of SciPy and
# doesn't follow general development.
#
# Note that only GitHub handles (whether individuals or teams) with commit
# rights should be added to this file.
# See https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners
# for more details about how CODEOWNERS works.
# Each line is a file pattern followed by one or more owners.
.github/CODEOWNERS @rgommers @larsoner @lucascolley
# Build related files
pyproject.toml @rgommers
tools/generate_requirements.py @rgommers
requirements/ @rgommers
environment.yml @rgommers
scipy/_build_utils/ @rgommers
# Dev CLI
.spin @rgommers
# CI config
.circleci/ @larsoner
.github/workflows/ @larsoner @andyfaff
# Developer workspace
*pixi* @lucascolley
# Linting
lint* @lucascolley
# Array API
*array_api* @lucascolley
# SciPy submodules (please keep in alphabetical order)
scipy/fft/ @peterbell10
scipy/fftpack/ @peterbell10
scipy/linalg/ @larsoner @ilayn
scipy/integrate/ @steppi
scipy/interpolate/ @ev-br
scipy/odr/ @rkern
scipy/optimize/ @andyfaff
scipy/signal/ @larsoner @ilayn @DietBru
scipy/sparse/ @perimosocordiae @dschult
scipy/sparse/csgraph/
scipy/sparse/linalg/
scipy/spatial/ @tylerjereddy @peterbell10
scipy/special/ @steppi
scipy/stats/_distn_infrastructure/ @andyfaff @ev-br
scipy/stats/*distr*.py @ev-br
scipy/stats/_continuous_distns/ @andyfaff
scipy/stats/_covariance.py @mdhaber
scipy/stats/_hypothesis.py @tupui
scipy/stats/*qmc.* @tupui
scipy/stats/_multicomp.py @tupui @mdhaber
scipy/stats/_resampling.py @mdhaber
scipy/stats/*sobol* @tupui
scipy/stats/_sensitivity_analysis.py @tupui
scipy/stats/_survival.py @tupui @mdhaber
scipy/stats/.unuran/ @tirthasheshpatel
# Testing infrastructure
tools/refguide_check.py @ev-br
tools/ @larsoner @rgommers
pytest.ini @larsoner
.coveragerc @larsoner
benchmarks/asv.conf.json @larsoner
# Doc
requirements/doc.txt @tupui
doc/source/conf.py @tupui
doc/source/_static/ @tupui
# Meson
meson* @rgommers

View File

@ -1,7 +1,7 @@
SciPy Code of Conduct
======
You can read our Code of Conduct by following [this link](../main/doc/source/dev/conduct/code_of_conduct.rst).
You can read our Code of Conduct by following [this link](../doc/source/dev/conduct/code_of_conduct.rst).
Alternatively, you can find it under `scipy/doc/source/dev/conduct/code_of_conduct.rst`.

3
.github/FUNDING.yml vendored
View File

@ -1,3 +0,0 @@
github: [numfocus]
tidelift: pypi/scipy
custom: https://scipy.org/about/#donate

20
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,20 @@
<<Please describe the issue in detail here, and for bug reports fill in the fields below.>>
### Reproducing code example:
```
<<A short code example that reproduces the problem. It should be self-contained, i.e., possible to run as-is via 'python myproblem.py'>>
```
### Error message:
```
<<Full error message, if any (starting from line Traceback: ...)>>
```
### Scipy/Numpy/Python version information:
```
<<Output from 'import sys, scipy, numpy; print(scipy.__version__, numpy.__version__, sys.version_info)'>>
```

View File

@ -1,38 +0,0 @@
name: Bug Report
description: Create a report to help us improve SciPy
title: "BUG: <Please write a comprehensive title after the 'BUG: ' prefix>"
labels: [defect]
body:
- type: markdown
attributes:
value: Thank you for taking the time to file a bug report.
- type: markdown
attributes:
value: Before continuing, please check the issue tracker for existing issues and the [development documentation](https://scipy.github.io/devdocs/index.html). Your issue could be already fixed on the development version. If you have an installation problem, make sure that you are following the [latest installation recommendations](https://scipy.github.io/devdocs/dev/contributor/contributor_toc.html#development-environment).
- type: textarea
attributes:
label: Describe your issue.
validations:
required: true
- type: textarea
attributes:
label: Reproducing Code Example
description: 'Paste the Reproducing code example and it will be rendered as a code block.'
render: python
validations:
required: true
- type: textarea
attributes:
label: Error message
description: 'If any, paste the *full* error message as above (starting from line Traceback) and it will be rendered as a code block'
render: shell
validations:
required: true
- type: textarea
attributes:
label: SciPy/NumPy/Python version and system information
description: 'Please run the following and paste the result here - `import sys, scipy, numpy; print(scipy.__version__, numpy.__version__, sys.version_info); scipy.show_config()`'
render: shell
validations:
required: true

View File

@ -1,26 +0,0 @@
name: Documentation
description: Report an issue related to the SciPy documentation.
title: "DOC: <Please write a comprehensive title after the 'DOC: ' prefix>"
labels: [Documentation]
body:
- type: textarea
attributes:
label: "Issue with current documentation:"
description: >
Please check the development version of the documentation at
https://scipy.github.io/devdocs/ to see if this issue has already been
addressed. If not, please link to the development version (either using
the link above or selecting the dev version in the documentation version
dropdown) of the document/code you're referring to.
- type: textarea
attributes:
label: "Idea or request for content:"
description: >
Please describe as clearly as possible a suggested fix, better phrasing or
topics you think are missing from the current documentation.
- type: textarea
attributes:
label: Additional context (e.g. screenshots, GIFs)

View File

@ -1,22 +0,0 @@
name: Feature Request
description: Suggest an idea for SciPy
title: "ENH: "
labels: [enhancement]
body:
- type: textarea
attributes:
label: Is your feature request related to a problem? Please describe.
validations:
required: false
- type: textarea
attributes:
label: Describe the solution you'd like.
validations:
required: false
- type: textarea
attributes:
label: Describe alternatives you've considered.
- type: textarea
attributes:
label: Additional context (e.g. screenshots, GIFs)

View File

@ -1,11 +0,0 @@
blank_issues_enabled: true
contact_links:
- name: Stack Overflow
url: https://stackoverflow.com/questions/tagged/scipy
about: Please ask and answer usage questions on Stack Overflow
- name: Developer Forum
url: https://discuss.scientific-python.org/c/contributor/scipy
about: Development discussions and announcements on the forum
- name: Blank issue
url: https://github.com/scipy/scipy/issues/new
about: Please note that other templates should be used in most cases

View File

@ -1,28 +0,0 @@
<!--
Thanks for contributing a pull request! Please ensure that
your PR satisfies the checklist before submitting:
https://scipy.github.io/devdocs/dev/contributor/development_workflow.html#checklist-before-submitting-a-pr
Also, please name and describe your PR as you would write a
commit message:
https://scipy.github.io/devdocs/dev/contributor/development_workflow.html#writing-the-commit-message.
However, please only include an issue number in the description, not the title,
and please ensure that any code names containing underscores are enclosed in backticks.
Depending on your changes, you can skip CI operations and save time and energy:
https://scipy.github.io/devdocs/dev/contributor/continuous_integration.html#skipping
Note that we are a team of volunteers; we appreciate your
patience during the review process.
Again, thanks for contributing!
-->
#### Reference issue
<!--Example: Closes gh-WXYZ.-->
#### What does this implement/fix?
<!--Please explain your changes.-->
#### Additional information
<!--Any additional information you think is important.-->

View File

@ -1,24 +0,0 @@
name: Setup Compiler Cache
description: Prepare and restore compiler cache using ccache
inputs:
workflow_name:
description: "Unique name of the calling workflow to use as cache key prefix"
required: true
runs:
using: "composite"
steps:
- name: Prepare compiler cache
id: prep-ccache
shell: bash
run: |
mkdir -p "${CCACHE_DIR:-$HOME/.ccache}"
echo "dir=${CCACHE_DIR:-$HOME/.ccache}" >> $GITHUB_OUTPUT
NOW=$(date -u +"%F-%T")
echo "timestamp=${NOW}" >> $GITHUB_OUTPUT
- name: Setup compiler cache
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684
with:
path: ${{ steps.prep-ccache.outputs.dir }}
key: ${{ inputs.workflow_name }}-ccache-linux-${{ steps.prep-ccache.outputs.timestamp }}
restore-keys: ${{ inputs.workflow_name }}-ccache-linux-

View File

@ -1,174 +0,0 @@
# This file contains globs for automatically adding labels based on changed files,
# for use with https://github.com/actions/labeler.
scipy.cluster:
- changed-files:
- any-glob-to-any-file:
- scipy/cluster/**
scipy.constants:
- changed-files:
- any-glob-to-any-file:
- scipy/constants/**
scipy.differentiate:
- changed-files:
- any-glob-to-any-file:
- scipy/differentiate/**
scipy.fft:
- changed-files:
- any-glob-to-any-file:
- scipy/fft/**
scipy.fftpack:
- changed-files:
- any-glob-to-any-file:
- scipy/fftpack/**
scipy.integrate:
- changed-files:
- any-glob-to-any-file:
- scipy/integrate/**
scipy.interpolate:
- changed-files:
- any-glob-to-any-file:
- scipy/interpolate/**
scipy.io:
- changed-files:
- any-glob-to-any-file:
- scipy/io/**
scipy._lib:
- changed-files:
- any-glob-to-any-file:
- scipy/_lib/**
scipy.linalg:
- changed-files:
- any-glob-to-any-file:
- scipy/linalg/**
scipy.ndimage:
- changed-files:
- any-glob-to-any-file:
- scipy/ndimage/**
scipy.odr:
- changed-files:
- any-glob-to-any-file:
- scipy/odr/**
scipy.optimize:
- changed-files:
- any-glob-to-any-file:
- scipy/optimize/**
scipy.signal:
- changed-files:
- any-glob-to-any-file:
- scipy/signal/**
scipy.sparse:
- changed-files:
- all-globs-to-any-file:
- scipy/sparse/**
# don't match the `csgraph` or `linalg` submodules
- '!scipy/sparse/csgraph/**'
- '!scipy/sparse/linalg/**'
scipy.sparse.csgraph:
- changed-files:
- any-glob-to-any-file:
- scipy/sparse/csgraph/**
scipy.sparse.linalg:
- changed-files:
- any-glob-to-any-file:
- scipy/sparse/linalg/**
scipy.spatial:
- changed-files:
- any-glob-to-any-file:
- scipy/spatial/**
scipy.special:
- changed-files:
- any-glob-to-any-file:
- scipy/special/**
scipy.stats:
- changed-files:
- any-glob-to-any-file:
- scipy/stats/**
Cython:
- changed-files:
- any-glob-to-any-file:
- scipy/**/*.pyx*
- scipy/**/*.pxd*
- scipy/**/*.pxi*
- scipy/**/_generate_pyx.py
Fortran:
- changed-files:
- any-glob-to-any-file:
- scipy/**/*.f
- scipy/**/*.f.src
- scipy/**/*.F
- scipy/**/*.f90
- scipy/**/*.pyf
C/C++:
- changed-files:
- any-glob-to-any-file:
- scipy/**/*.c
- scipy/**/*.c.in
- scipy/**/*.c.old
- scipy/**/*.h
- scipy/**/*.h.in
- scipy/**/*.cpp
- scipy/**/*.cc
- scipy/**/*.cxx
- scipy/**/*.hpp
array types:
- changed-files:
- any-glob-to-any-file:
- scipy/_lib/_array_api.py
Meson:
- changed-files:
- any-glob-to-any-file:
- scipy/**/meson.build
- meson.build
Documentation:
- changed-files:
- any-glob-to-any-file:
- doc/**
CI:
- changed-files:
- any-glob-to-any-file:
- .circleci/**
- .github/workflows/**
- ci/**
DX:
- changed-files:
- any-glob-to-any-file:
- doc/source/dev/**
Build issues:
- changed-files:
- any-glob-to-any-file:
- scipy/_build_utils/**
uarray:
- changed-files:
- any-glob-to-any-file:
- scipy/_lib/_uarray/**
- scipy/_lib/uarray.py

38
.github/labeler.yml vendored
View File

@ -1,38 +0,0 @@
# This file contains regexes for automatically adding labels based on issue/PR titles,
# for use with https://github.com/github/issue-labeler.
array types:
- 'array types'
Benchmarks:
- 'BENCH'
Build issues:
- 'BLD'
CI:
- 'CI'
defect:
- '(BUG|FIX)'
deprecated:
- 'DEP'
Documentation:
- 'DOC'
DX:
- 'DEV'
enhancement:
- 'ENH'
maintenance:
- '(MAINT|TST)'
query:
- '(Q|q)uery'
RFC:
- 'RFC'

View File

@ -1,28 +0,0 @@
name: Build Dependencies(Win-ARM64)
description: "Common setup steps for Win-ARM64 CI"
runs:
using: "composite"
steps:
- name: Install LLVM
shell: pwsh
run: |
Invoke-WebRequest https://github.com/llvm/llvm-project/releases/download/llvmorg-20.1.8/LLVM-20.1.8-woa64.exe -UseBasicParsing -OutFile LLVM-woa64.exe
$expectedHash = "7c4ac97eb2ae6b960ca5f9caf3ff6124c8d2a18cc07a7840a4d2ea15537bad8e"
$fileHash = (Get-FileHash -Path "LLVM-woa64.exe" -Algorithm SHA256).Hash
if ($fileHash -ne $expectedHash) {
Write-Error "Checksum verification failed. The downloaded file may be corrupted or tampered with."
exit 1
}
Start-Process -FilePath ".\LLVM-woa64.exe" -ArgumentList "/S" -Wait
echo "C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
- name: Install pkgconf via vcpkg
shell: pwsh
run: |
& "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" arm64
$env:VCPKG_ROOT = "C:\vcpkg"
Set-Location $env:VCPKG_ROOT
./vcpkg install pkgconf:arm64-windows
$pkgconfPath = "$env:VCPKG_ROOT\installed\arm64-windows\tools\pkgconf"
Copy-Item "$pkgconfPath\pkgconf.exe" "$pkgconfPath\pkg-config.exe" -Force
echo "$pkgconfPath" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append

View File

@ -1,67 +0,0 @@
name: Array API
on:
push:
branches:
- maintenance/**
pull_request:
branches:
- main
- maintenance/**
permissions:
contents: read # to fetch code (actions/checkout)
env:
CCACHE_DIR: "${{ github.workspace }}/.ccache"
INSTALLDIR: "build-install"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
get_commit_message:
name: Get commit message
uses: ./.github/workflows/commit_message.yml
xp_cpu:
name: ${{ join(matrix.tasks, ' ') }}
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: ubuntu-22.04
strategy:
fail-fast: false
matrix:
include:
- tasks: [test-strict, test-torch, test-torch-float32, test-dask]
environments: [array-api-strict, torch-cpu, dask-cpu]
- tasks: [test-jax]
environments: [jax-cpu]
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- uses: prefix-dev/setup-pixi@ba3bb36eb2066252b2363392b7739741bb777659 # v0.8.1
with:
pixi-version: v0.60.0
cache: false
environments: build-cpu ${{ join(matrix.environments, ' ') }}
- name: Set up ccache
uses: ./.github/ccache
with:
workflow_name: ${{ github.workflow }}
- name: Build
run: pixi run build-cpu
- name: Test
run: |
export OMP_NUM_THREADS=2
for task in ${{ join(matrix.tasks, ' ') }}; do
pixi run --skip-deps "$task" -- --durations 3 --timeout=60
done

View File

@ -1,24 +0,0 @@
name: Redirect circleci artifacts
on: [status]
permissions:
contents: read # to fetch code (actions/checkout)
statuses: write # to report circleci status (scientific-python/circleci-artifacts-redirector-action)
jobs:
circleci_artifacts_redirector_job:
runs-on: ubuntu-22.04
if: >
github.repository == 'scipy/scipy'
&& github.event.context == 'ci/circleci: build_docs'
name: Run CircleCI artifacts redirector
steps:
- name: GitHub Action step
uses: scientific-python/circleci-artifacts-redirector-action@4e13a10d89177f4bfc8007a7064bdbeda848d8d1 # v1.0.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
api-token: ${{ secrets.CIRCLE_TOKEN }}
artifact-path: 0/html/index.html
circleci-jobs: build_docs
job-title: Check the rendered docs here!

View File

@ -1,102 +0,0 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL Advanced"
on:
push:
branches: [ "main", "maintenance/*", "maintenance/0.10.x", "maintenance/0.11.x", "maintenance/0.12.x", "maintenance/0.13.x", "maintenance/0.14.x", "maintenance/0.15.x", "maintenance/0.16.x", "maintenance/0.17.x", "maintenance/0.18.x", "maintenance/0.19.x", "maintenance/0.5.2.x", "maintenance/0.6.x", "maintenance/0.7.x", "maintenance/0.8.x", "maintenance/0.9.x", "maintenance/1.0.x", "maintenance/1.1.x", "maintenance/1.2.x", "maintenance/1.3.x", "maintenance/1.4.x", "maintenance/1.5.x", "maintenance/1.6.x" ]
pull_request:
branches: [ "main", "maintenance/*", "maintenance/0.10.x", "maintenance/0.11.x", "maintenance/0.12.x", "maintenance/0.13.x", "maintenance/0.14.x", "maintenance/0.15.x", "maintenance/0.16.x", "maintenance/0.17.x", "maintenance/0.18.x", "maintenance/0.19.x", "maintenance/0.5.2.x", "maintenance/0.6.x", "maintenance/0.7.x", "maintenance/0.8.x", "maintenance/0.9.x", "maintenance/1.0.x", "maintenance/1.1.x", "maintenance/1.2.x", "maintenance/1.3.x", "maintenance/1.4.x", "maintenance/1.5.x", "maintenance/1.6.x" ]
schedule:
- cron: '33 21 * * 3'
jobs:
analyze:
name: Analyze (${{ matrix.language }})
# Runner size impacts CodeQL analysis time. To learn more, please see:
# - https://gh.io/recommended-hardware-resources-for-running-codeql
# - https://gh.io/supported-runners-and-hardware-resources
# - https://gh.io/using-larger-runners (GitHub.com only)
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
permissions:
# required for all workflows
security-events: write
# required to fetch internal or private CodeQL packs
packages: read
# only required for workflows in private repositories
actions: read
contents: read
strategy:
fail-fast: false
matrix:
include:
- language: actions
build-mode: none
- language: c-cpp
build-mode: none
- language: python
build-mode: none
# CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'rust', 'swift'
# Use `c-cpp` to analyze code written in C, C++ or both
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Add any setup steps before running the `github/codeql-action/init` action.
# This includes steps like installing compilers or runtimes (`actions/setup-node`
# or others). This is typically only required for manual builds.
# - name: Setup runtime (example)
# uses: actions/setup-example@v1
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
build-mode: ${{ matrix.build-mode }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# If the analyze step fails for one of the languages you are analyzing with
# "We were unable to automatically build your code", modify the matrix above
# to set the build mode to "manual" for that language. Then modify this step
# to build your code.
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
- if: matrix.build-mode == 'manual'
shell: bash
run: |
echo 'If you are using a "manual" build mode for one or more of the' \
'languages you are analyzing, replace this with the commands to build' \
'your code, for example:'
echo ' make bootstrap'
echo ' make release'
exit 1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

View File

@ -1,44 +0,0 @@
name: Skip tag checker
on:
workflow_call:
outputs:
message:
description: "Skip tag checker"
value: ${{ jobs.check_skip_tags.outputs.message }}
permissions:
contents: read
jobs:
check_skip_tags:
name: Check for skips
runs-on: ubuntu-latest
outputs:
message: ${{ steps.skip_check.outputs.message }}
steps:
- name: Checkout scipy
if: ${{ !contains(github.actor, 'nektos/act') }}
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
# Gets the correct commit message for pull request
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Check for skips
id: skip_check
# the lint workflow is not currently skipped with [docs only].
# this could be changed by adding a new job which uses this workflow to lint.yml
# and changing the output here based on the combination of tags (if desired).
run: |
set -xe
RUN="1"
# For running a job locally with Act (https://github.com/nektos/act),
# always run the job rather than skip based on commit message
if [[ $ACT != true ]]; then
COMMIT_MSG=$(git log --no-merges -1)
if [[ "$COMMIT_MSG" == *"[lint only]"* || "$COMMIT_MSG" == *"[docs only]"* ]]; then
RUN="0"
fi
fi
echo "message=$RUN" >> $GITHUB_OUTPUT
echo github.ref $GITHUB_REF

View File

@ -1,115 +0,0 @@
name: GPU jobs
# Updating the Pixi lock file:
#
# 1. Please try to avoid this unless it's actually necessary. Updating the lock
# file generates a large diff, and triggers a lot of large package downloads
# (also for local testing by maintainers).
# 2. Consider committing the `pixi.toml` change in one commit, and the
# `pixi.lock` change in a second commit. This allows the reviewer to redo
# the lock file changes locally, or discard them in case of conflicts while
# still keeping the `pixi.toml` changes.
#
#
# In order to run a CI job step locally on a Linux machine with an NVIDIA GPU:
#
# $ cd .github/workflows
# $ pixi run build
# $ pixi run -e cupy test-cupy -v -m "array_api_backends and not slow"
#
# Or run different test commands in the same environments, e.g.:
#
# $ pixi run test-torch-cuda -s special
# $ pixi run test-torch -s special
#
# To see available tasks per environment:
#
# $ pixi task ls -s
on:
push:
branches:
- maintenance/**
pull_request:
branches:
- main
- maintenance/**
permissions:
contents: read # to fetch code (actions/checkout)
env:
CCACHE_DIR: "${{ github.workspace }}/.ccache"
PIXI_CACHE_DIR: "${{ github.workspace }}/.cache/rattler"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
get_commit_message:
name: Get commit message
uses: ./.github/workflows/commit_message.yml
pytorch_gpu:
name: PyTorch, JAX, CuPy GPU
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: ghcr.io/cirruslabs/ubuntu-runner-amd64-gpu:24.04
steps:
- name: Checkout scipy repo
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
submodules: recursive
- name: Cache pixi
uses: cirruslabs/cache@v4 #caa3ad0624c6c2acd8ba50ad452d1f44bba078bb # v4
with:
path: ${{ env.PIXI_CACHE_DIR }}
# Cache hit if lock file did not change. If it did, still restore the cache,
# since most packages will still be the same - the cache save will
# then happen at the end (in case the lock file didn't change,
# nothing is saved at the end of a job).
key: ${{ runner.os }}-gpu-pixi-${{ hashFiles('.github/workflows/pixi.lock') }}
restore-keys: |
${{ runner.os }}-gpu-pixi
- name: Setup compiler cache
uses: cirruslabs/cache@v4 #caa3ad0624c6c2acd8ba50ad452d1f44bba078bb # v4
with:
path: ${{ env.CCACHE_DIR }}
# Make primary key unique by using `run_id`, this ensures the cache
# is always saved at the end.
key: ${{ runner.os }}-gpu-ccache-${{ github.run_id }}
restore-keys: |
${{ runner.os }}-gpu-ccache
- name: run nvidia-smi
run: nvidia-smi
- name: run nvidia-smi --query
run: nvidia-smi --query
- uses: prefix-dev/setup-pixi@ba3bb36eb2066252b2363392b7739741bb777659 # v0.8.1
with:
pixi-version: v0.55.0
cache: false
environments: >-
build-cuda
torch-cuda
jax-cuda
cupy
- name: Build SciPy
run: pixi run build-cuda
- name: Run PyTorch GPU tests
run: pixi run --skip-deps test-torch-cuda -v
- name: Run JAX GPU tests
run: pixi run --skip-deps test-jax-cuda -v
- name: Run CuPy tests
run: pixi run --skip-deps test-cupy -v

View File

@ -1,23 +0,0 @@
name: "Issue Labeler"
on:
issues:
types: [opened]
jobs:
label_issue:
# Permissions needed for labelling issues automatically
# https://docs.github.com/en/actions/security-guides/automatic-token-authentication
permissions:
contents: read
issues: write
runs-on: ubuntu-latest
steps:
# label based on issue title
- uses: github/issue-labeler@c1b0f9f52a63158c4adc09425e858e87b32e9685 # v3.4
if: github.repository == 'scipy/scipy'
with:
configuration-path: .github/labeler.yml
include-title: 1
include-body: 0
enable-versioned-regex: 0
repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,52 +0,0 @@
name: Lint
on:
push:
branches:
- maintenance/**
pull_request:
branches:
- main
- maintenance/**
permissions:
contents: read # to fetch code (actions/checkout)
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
test_lint:
name: Lint
# If using act to run CI locally the github object does not exist and the usual skipping should not be enforced
if: "github.repository == 'scipy/scipy' || github.repository == ''"
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0 # previous commits used in tools/lint.py
submodules: recursive
- uses: prefix-dev/setup-pixi@ba3bb36eb2066252b2363392b7739741bb777659 # v0.8.1
with:
pixi-version: v0.55.0
cache: false
environments: lint
- name: Spin Lint
run: |
set -euo pipefail
pixi run lint --diff-against origin/$GITHUB_BASE_REF
- name: Check that Python.h is first in any file including it.
shell: bash
run: pixi run check-python-h
- name: Check module interdependencies
working-directory: .github/workflows
run: pixi run tach-check
- name: Check external dependencies
working-directory: .github/workflows
run: pixi run tach-check-external

View File

@ -1,597 +0,0 @@
name: Linux tests
on:
push:
branches:
- maintenance/**
pull_request:
branches:
- main
- maintenance/**
permissions:
contents: read # to fetch code (actions/checkout)
env:
CCACHE_DIR: "${{ github.workspace }}/.ccache"
INSTALLDIR: "build-install"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
get_commit_message:
name: Get commit message
uses: ./.github/workflows/commit_message.yml
test_meson:
name: mypy (py3.12) & dev deps (py3.14), fast, spin
needs: get_commit_message
# If using act to run CI locally the github object does not exist and
# the usual skipping should not be enforced
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: ubuntu-22.04
strategy:
matrix:
python-version: ['3.12', '3.14-dev'] # this run will use python dev versions when available
maintenance-branch:
- ${{ contains(github.ref, 'maintenance/') || contains(github.base_ref, 'maintenance/') }}
exclude:
- maintenance-branch: true
python-version: '3.14-dev'
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: Setup Python
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: ${{ matrix.python-version }}
allow-prereleases: true
- name: Install Ubuntu dependencies
run: |
# NOTE: not the same OpenBLAS version as in upstream CI (I'm being lazy here)
sudo apt-get update
sudo apt-get install -y libopenblas-dev libatlas-base-dev liblapack-dev gfortran libgmp-dev libmpfr-dev libsuitesparse-dev ccache libmpc-dev
- name: Install Python packages
if: matrix.python-version == '3.12'
run: |
python -m pip install numpy cython pytest pytest-xdist pytest-timeout pybind11 mpmath gmpy2 pythran ninja meson pooch hypothesis spin "click<8.3.0"
- name: Install Python packages from repositories
if: matrix.python-version == '3.14-dev' # this run will use python dev versions when available
run: |
python -m pip install git+https://github.com/numpy/numpy.git
python -m pip install ninja cython pytest pybind11 pytest-xdist pytest-timeout spin pooch hypothesis "setuptools<67.3" meson "click<8.3.0"
python -m pip install git+https://github.com/serge-sans-paille/pythran.git
# Disable Meson master testing until upstream option handling is fixed, see scipy#22534
# python -m pip install git+https://github.com/mesonbuild/meson.git
- name: Prepare compiler cache
id: prep-ccache
shell: bash
run: |
mkdir -p "${CCACHE_DIR}"
echo "dir=$CCACHE_DIR" >> $GITHUB_OUTPUT
NOW=$(date -u +"%F-%T")
echo "timestamp=${NOW}" >> $GITHUB_OUTPUT
- name: Setup compiler cache
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
id: cache-ccache
# Reference: https://docs.github.com/en/actions/guides/caching-dependencies-to-speed-up-workflows#matching-a-cache-key
# NOTE: The caching strategy is modeled in a way that it will always have a unique cache key for each workflow run
# (even if the same workflow is run multiple times). The restore keys are not unique and for a partial match, they will
# return the most recently created cache entry, according to the GitHub Action Docs.
with:
path: ${{ steps.prep-ccache.outputs.dir }}
# Restores ccache from either a previous build on this branch or on main
key: ${{ github.workflow }}-${{ matrix.python-version }}-ccache-linux-${{ steps.prep-ccache.outputs.timestamp }}
# This evaluates to `Linux Tests-3.12-ccache-linux-` which is not unique. As the CI matrix is expanded, this will
# need to be updated to be unique so that the cache is not restored from a different job altogether.
restore-keys: |
${{ github.workflow }}-${{ matrix.python-version }}-ccache-linux-
- name: Setup build and install scipy
run: |
spin build --release
- name: Ccache performance
shell: bash -l {0}
run: ccache -s
- name: Check installation
run: |
pushd tools
python check_installation.py ${{ env.INSTALLDIR }}
./check_pyext_symbol_hiding.sh ../build
popd
- name: Check usage of install tags
run: |
rm -r ${{ env.INSTALLDIR }}
spin build --tags=runtime,python-runtime,devel
python tools/check_installation.py ${{ env.INSTALLDIR }} --no-tests
rm -r ${{ env.INSTALLDIR }}
spin build --tags=runtime,python-runtime,devel,tests
python tools/check_installation.py ${{ env.INSTALLDIR }}
- name: Check build-internal dependencies
run: ninja -C build -t missingdeps
- name: Mypy
if: matrix.python-version == '3.12'
run: |
# Packages that are only needed for their annotations
python -m pip install mypy==1.10.0 types-psutil typing_extensions
python -m pip install pybind11 sphinx
spin mypy
- name: Test SciPy
run: |
export OMP_NUM_THREADS=2
spin test -j3 -- --durations 10 --timeout=60
#################################################################################
test_venv_install:
name: Install into venv, cluster only, pyAny/npAny, pip+cluster.test()
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: Install Ubuntu dependencies
run: |
# We're not running the full test suite here, only testing the install
# into a venv is working, so leave out optional dependencies. That's
# also why we can get away with an old version of OpenBLAS from Ubuntu
sudo apt-get update
sudo apt-get install -y python3-dev libopenblas-dev pkg-config gfortran
- name: Create venv, install SciPy
run: |
python -m venv ../venvs/scipy-venv
source ../venvs/scipy-venv/bin/activate
# Note that this uses build isolation. That's why we don't need build
# dependencies to be installed in the venv itself.
python -m pip install . -vv -Csetup-args=--werror
- name: Basic imports and tests
run: |
source ../venvs/scipy-venv/bin/activate
cd ..
python -c "import scipy"
python -c "import scipy.linalg"
python -m pip install pytest hypothesis
python -c "from scipy import cluster; cluster.test()"
- name: Create venv inside source tree
# This is a regression test for gh-16312
run: |
python -m venv .venv
source .venv/bin/activate
# Install build dependencies. Use meson-python from its main branch,
# most convenient to test in this job because we're using pip without
# build isolation here.
python -m pip install numpy pybind11 pythran cython pytest ninja hypothesis
python -m pip install git+https://github.com/mesonbuild/meson-python.git
# Non-isolated build, so we use dependencies installed inside the source tree
python -m pip install -U pip # need pip >=23 for `--config-settings`
python -m pip install . -v --no-build-isolation
# Basic tests
cd ..
python -c "import scipy"
python -c "import scipy.linalg"
python -c "from scipy import cluster; cluster.test()"
#################################################################################
python_debug:
# also uses the vcs->sdist->wheel route.
name: Python-debug & ATLAS & sdist+wheel, fast, py3.12/npMin, pip+pytest
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: ubuntu-24.04 # provides python3.12-dbg
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: Configuring Test Environment
run: |
sudo apt-get update
sudo apt install python3-dbg python3-dev libatlas-base-dev liblapack-dev gfortran libgmp-dev libmpfr-dev libmpc-dev
python3-dbg --version # just to check
python3-dbg -c 'import sys; print("Python debug build:", hasattr(sys, "gettotalrefcount"))'
- name: Build SciPy
run: |
python3-dbg -m pip install build
python3-dbg -m build -Csetup-args=-Dbuildtype=debugoptimized -Csetup-args=-Dblas=blas-atlas -Csetup-args=-Dlapack=lapack-atlas
python3-dbg -m pip install dist/scipy*.whl
- name: Testing SciPy
run: |
cd doc
python3-dbg -m pip install pytest pytest-xdist pytest-timeout mpmath gmpy2 threadpoolctl pooch hypothesis
python3-dbg -m pytest --pyargs scipy -n4 --durations=10 -m "not slow"
#################################################################################
gcc9:
# Purpose is to examine builds with oldest-supported gcc and test with pydata/sparse.
name: Oldest GCC & pydata/sparse, full, py3.12/npMin, pip+pytest
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: Setup Python
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: "3.12"
- name: Setup system dependencies
run: |
sudo apt-get -y update
sudo apt install -y g++-9 gcc-9 gfortran-9
sudo apt install -y libatlas-base-dev liblapack-dev libgmp-dev \
libmpfr-dev libmpc-dev pkg-config libsuitesparse-dev liblapack-dev
- name: Setup Python build deps
run: |
pip install build meson-python ninja pythran pybind11 cython numpy
- name: Build wheel and install
run: |
set -euo pipefail
export PYTHONOPTIMIZE=2
# specify which compilers to use using environment variables
CC=gcc-9 CXX=g++-9 FC=gfortran-9 python -m build --wheel --no-isolation -Csetup-args=-Dblas=blas-atlas -Csetup-args=-Dlapack=lapack-atlas
python -m pip install dist/scipy*.whl
- name: Install test dependencies
run: |
# Downgrade numpy to oldest supported version
pip install gmpy2 threadpoolctl mpmath pooch pytest pytest-xdist==2.5.0 pytest-timeout hypothesis sparse "numpy==2.0.0"
- name: Run tests
run: |
# can't be in source directory
pushd $RUNNER_TEMP
export PYTHONOPTIMIZE=2
python -m pytest --pyargs scipy -n4 --durations=10
popd
#################################################################################
prerelease_deps_coverage_64bit_blas:
# TODO: re-enable ILP64 build.
name: Prerelease deps & coverage report, full, py3.12/npMin & py3.13/npPre, spin, SCIPY_ARRAY_API=1
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.12']
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: Setup Python
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: ${{ matrix.python-version }}
- name: Install Ubuntu dependencies
run: |
sudo apt-get update
sudo apt-get install -y libgmp-dev libmpfr-dev libmpc-dev ccache gfortran lcov
- name: Install Python packages
run: |
python -m pip install cython pythran ninja meson-python pybind11 spin "click<8.3.0"
python -m pip install --pre --upgrade pytest pytest-cov pytest-xdist mpmath gmpy2 threadpoolctl pooch hypothesis matplotlib
python -m pip install -r requirements/openblas.txt
python -m pip install --pre --upgrade --timeout=60 -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple numpy
- name: Prepare compiler cache
id: prep-ccache
shell: bash
run: |
mkdir -p "${CCACHE_DIR}"
echo "dir=$CCACHE_DIR" >> $GITHUB_OUTPUT
NOW=$(date -u +"%F-%T")
echo "timestamp=${NOW}" >> $GITHUB_OUTPUT
- name: Setup compiler cache
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
id: cache-ccache
with:
path: ${{ steps.prep-ccache.outputs.dir }}
# Restores ccache from either a previous build on this branch or on main
key: ${{ github.workflow }}-${{ matrix.python-version }}-ccache-linux-prerelease-${{ steps.prep-ccache.outputs.timestamp }}
restore-keys: |
${{ github.workflow }}-${{ matrix.python-version }}-ccache-linux-prerelease-
- name: Build and install SciPy
run: |
spin build --gcov --with-scipy-openblas --release
- name: Ccache performance
shell: bash -l {0}
run: ccache -s
- name: Downgrade NumPy to lowest supported
run: |
python -m pip install "numpy==2.0.0"
- name: Test SciPy
run: |
export OPENBLAS_NUM_THREADS=1
export SCIPY_ARRAY_API=1
spin test --no-build --coverage -j2 --mode full -- --cov --cov-report term-missing
#################################################################################
linux_32bit:
name: 32-bit, fast, py3.12/npMin, spin
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: ubuntu-latest
# I tried running directly in a container:, using the image: and options:
# entries. Unfortunately at this time options: does not seem to listen to
# --platform linux/i386.
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: build + test
run: |
set -exuo pipefail
docker pull quay.io/pypa/manylinux2014_i686
docker run -v $(pwd):/scipy --platform=linux/i386 quay.io/pypa/manylinux2014_i686 /bin/bash -c "cd /scipy && \
uname -a && \
python3.12 -m venv test && \
source test/bin/activate && \
python -m pip install spin 'click<8.3.0' meson ninja && \
python -m pip install -r requirements/openblas.txt && \
# Ensure that scipy-openblas is picked up by the numpy<1.26 build
cat > \$HOME/.numpy-site.cfg <<EOL
[openblas]
libraries = \$(python -c 'import scipy_openblas32; print(scipy_openblas32.get_library())')
library_dirs = \$(python -c 'import scipy_openblas32; print(scipy_openblas32.get_lib_dir())')
include_dirs = \$(python -c 'import scipy_openblas32; print(scipy_openblas32.get_include_dir())')
runtime_library_dirs = \$(python -c 'import scipy_openblas32; print(scipy_openblas32.get_lib_dir())')
symbol_prefix = scipy_
EOL
python -m pip install numpy==2.0.0 cython pybind11 pytest pytest-timeout pytest-xdist pytest-env mpmath pythran pooch meson hypothesis && \
python -c 'import numpy as np; np.show_config()' && \
spin build --with-scipy-openblas && \
spin test"
#################################################################################
distro_multiple_pythons:
# Purpose is to build for a non-default Python interpreter in a Linux distro
# For such a build config, `python`/`python3` executables may not have
# build dependencies like Cython or NumPy installed.
name: non-default Python interpreter, fast, py3.12/npMin, pip+pytest
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: Setup system dependencies
run: |
sudo apt-get -y update
sudo apt install software-properties-common
sudo add-apt-repository ppa:deadsnakes/ppa
sudo apt update -y
sudo apt install -y python3.12-dev ninja-build pkg-config libatlas-base-dev liblapack-dev
- name: Setup Python build deps
run: |
python3.12 -m pip install build pythran pybind11 cython numpy meson-python
- name: Build wheel and install
run: |
python3.12 -m build -wnx -Csetup-args=-Dblas=blas-atlas -Csetup-args=-Dlapack=lapack-atlas
python3.12 -m pip install dist/*.whl
- name: Install test dependencies
run: |
python3.12 -m pip install pytest hypothesis
- name: Run tests
run: |
# Just a small subset of tests; this will be fine if the build
# succeeds (that's the real purpose of this job)
pushd $RUNNER_TEMP
python3.12 -m pytest --pyargs scipy.cluster
python3.12 -m pytest --pyargs scipy.linalg
popd
free-threaded:
needs: get_commit_message
strategy:
matrix:
parallel: ['0', '1']
runs-on: ubuntu-latest
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
fetch-tags: true
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: '3.14t-dev'
- name: Install Ubuntu dependencies
run: |
sudo apt-get update
sudo apt-get install -y gfortran
- name: Install Python dependencies
run: |
pip install -r requirements/build.txt
pip install -r requirements/openblas.txt
pip install spin pytest pytest-xdist threadpoolctl pooch hypothesis "click<8.3.0"
- name: Install pytest-run-parallel
if: ${{ matrix.parallel == '1'}}
run: |
pip install pytest-run-parallel
pip uninstall --yes pytest-xdist
- name: Build SciPy
run: |
spin build --with-scipy-openblas
- name: Run tests (full)
if: ${{ matrix.parallel == '0'}}
run: |
spin test -j4 -m full --durations=10
- name: Run tests (fast, with pytest-run-parallel)
if: ${{ matrix.parallel == '1'}}
env:
# Excluded modules:
# - scipy.spatial has multiple issues in kdtree/qhull, and gh-20655 is pending.
TEST_SUBMODULES: >-
scipy._lib
scipy.cluster
scipy.constants
scipy.datasets
scipy.differentiate
scipy.fft
scipy.fftpack
scipy.integrate
scipy.interpolate
scipy.io
scipy.linalg
scipy.misc
scipy.ndimage
scipy.odr
scipy.optimize
scipy.signal
scipy.sparse
scipy.special
scipy.stats
run: |
# Note: Only fast tests; full test suite is unlikely to uncover anything more,
# and it'll be quite slow with pytest-run-parallel. Also skip
# tests that won't run in parallel, those are covered by the
# parallel == 0 run.
spin test -t $TEST_SUBMODULES -- --parallel-threads=4 --skip-thread-unsafe=true
#################################################################################
clang-17-build-only:
# Purpose is to check for warnings in builds with latest clang.
# We do not run the test suite here.
name: Clang-17 build-only (-Werror)
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: Setup Python
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: "3.12"
- name: Setup system dependencies
run: |
sudo apt-get -y update
wget https://apt.llvm.org/llvm.sh
chmod u+x llvm.sh
sudo ./llvm.sh 17
sudo apt install -y libopenblas-dev liblapack-dev
- name: Setup Python build deps
run: |
pip install -r requirements/build.txt
pip install build
- name: Build wheel, check for compiler warnings
run: |
# specify which compilers to use using environment variables
CC=clang-17 CXX=clang++-17 FC=gfortran python -m build -wnx -Csetup-args=--werror
#################################################################################
test_aarch64:
name: aarch64, fast, fail slow, py3.12/npAny, pip+pytest
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: ubuntu-24.04-arm
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: Setup Python
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: '3.12'
- name: Install Ubuntu dependencies
run: |
sudo apt-get update
sudo apt-get install -y libopenblas-dev libatlas-base-dev liblapack-dev gfortran libgmp-dev libmpfr-dev libsuitesparse-dev ccache libmpc-dev
- name: Install Python packages
run: |
python -m pip install -r requirements/build.txt -r requirements/test.txt
# We want to check for test timing only in a single job, on Linux, running the
# fast test suite. This is that job. See gh-20806 for previous issues
# after running this job on Windows and in multiple jobs.
python -m pip install pytest-fail-slow
- name: Install SciPy
run: |
python -m pip install . --no-build-isolation
- name: Test SciPy
run: |
export OMP_NUM_THREADS=2
cd ..
pytest --pyargs scipy -m 'not slow' --durations=0 --durations-min=0.5 --fail-slow=1.0

View File

@ -1,142 +0,0 @@
name: BLAS tests (Linux)
# This file is meant for testing LP64/ILP64 BLAS/LAPACK flavors and build
# options on Linux. All other yml files for Linux will only test LP64 BLAS/LAPACK flavors.
#
# Jobs and their purpose:
#
# - mkl:
# Tests MKL installed from PyPI (because easiest/fastest) in
# 2 ways: both LP64 and ILP64 via pkg-config.
#
# - scipy-openblas64:
# Tests ILP64-enabled build with scipy-openblas32 and scipy-openblas64.
#
on:
pull_request:
branches:
- main
- maintenance/**
defaults:
run:
shell: bash
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
permissions:
contents: read # to fetch code (actions/checkout)
jobs:
get_commit_message:
name: Get commit message
uses: ./.github/workflows/commit_message.yml
mkl-lp64:
runs-on: ubuntu-latest
name: "MKL LP64"
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
fetch-depth: 0
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: '3.12'
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y gfortran
pip install cython numpy pybind11 pythran pytest hypothesis pytest-xdist pooch
pip install -r requirements/dev.txt
pip install git+https://github.com/numpy/meson.git@main-numpymeson
pip install mkl mkl-devel
- name: Build with defaults (LP64)
run: |
pkg-config --libs mkl-dynamic-lp64-seq # check link flags
spin build -S-Dblas=mkl
- name: Test
run: spin test -j2
mkl-ilp64:
runs-on: ubuntu-latest
name: "MKL ILP64"
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
fetch-depth: 0
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: '3.12'
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y gfortran
pip install cython numpy pybind11 pythran pytest hypothesis pytest-xdist pooch
pip install -r requirements/dev.txt
pip install git+https://github.com/numpy/meson.git@main-numpymeson
pip install mkl mkl-devel
- name: Build with ILP64
run: |
pkg-config --libs mkl-dynamic-ilp64-seq # check link flags
spin build -S-Dblas=mkl -S-Duse-ilp64=true
- name: Test
run: spin test -j2
scipy-openblas-ilp64:
runs-on: ubuntu-latest
name: "scipy-openblas ILP64"
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
fetch-depth: 0
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: '3.12'
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y gfortran
pip install cython numpy pybind11 pythran pytest hypothesis pytest-xdist pooch
pip install -r requirements/dev.txt
pip install git+https://github.com/numpy/meson.git@main-numpymeson
pip install scipy-openblas32 scipy-openblas64
- name: Write out scipy-openblas64.pc
run: |
# spin does this for scipy-openblas32
python -c'import scipy_openblas64 as so64; print(so64.get_pkg_config())' > scipy-openblas64.pc
- name: Build with ILP64
run: |
spin build --with-scipy-openblas -S-Duse-ilp64=true
- name: Test
run: spin test -j2

View File

@ -1,125 +0,0 @@
name: Intel oneAPI tests
on:
push:
branches:
- maintenance/**
pull_request:
branches:
- main
- maintenance/**
schedule:
# ┌───────────── minute (0 - 59)
# │ ┌───────────── hour (0 - 23)
# │ │ ┌───────────── day of the month (1 - 31)
# │ │ │ ┌───────────── month (1 - 12 or JAN-DEC)
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
# │ │ │ │ │
- cron: "9 9 2/2 * *"
permissions:
contents: read # to fetch code (actions/checkout)
# The environment variable LINUX_BASEKIT_URL and LINUX_HPCKIT_URL
# store the URL for downloading Intel oneAPI.
# Reference - https://github.com/oneapi-src/oneapi-ci/blob/b4a96bd1888c130fcb73524d2b77b90f43681cbc/.github/workflows/build_all.yml#L11-L12
env:
LINUX_CPPKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/189a6c48-72de-4bef-a43c-eb1ee49787d6/intel-cpp-essentials-2025.0.1.27_offline.sh
LINUX_FORTRANKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/9e86b555-f238-4dea-b4b2-01b243e42483/intel-fortran-essentials-2025.0.1.27_offline.sh
LINUX_HPCKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/b7f71cf2-8157-4393-abae-8cea815509f7/intel-oneapi-hpc-toolkit-2025.0.1.47_offline.sh
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
get_commit_message:
name: Get commit message
uses: ./.github/workflows/commit_message.yml
icx_icpx_ifx_mkl:
name: py3.12, spin
needs: get_commit_message
# Ensure (a) this doesn't run on forks by default, and
# (b) it does run with Act locally (`github` doesn't exist there)
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: ubuntu-24.04
strategy:
matrix:
python-version: ["3.12"]
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
# Reference https://github.com/easimon/maximize-build-space/blob/master/action.yml
# see https://github.com/scipy/scipy/issues/23813
- name: Free up disk space
run: |
df -h
sudo rm -rf /usr/local/lib/android
df -h
- name: cache install
id: cache-install
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
with:
path: |
/opt/intel/oneapi/
key: install-${{ env.LINUX_CPPKIT_URL }}-${{ env.LINUX_FORTRANKIT_URL }}-${{ env.LINUX_HPCKIT_URL }}-compiler
- name: Install oneAPI CPP kit
if: steps.cache-install.outputs.cache-hit != 'true'
run: |
printenv LINUX_CPPKIT_URL
bash tools/install_intel_oneAPI_linux.sh $LINUX_CPPKIT_URL
- name: Install oneAPI Fortran kit
if: steps.cache-install.outputs.cache-hit != 'true'
run: |
printenv LINUX_FORTRANKIT_URL
bash tools/install_intel_oneAPI_linux.sh $LINUX_FORTRANKIT_URL
- name: Install oneAPI HPC kit
if: steps.cache-install.outputs.cache-hit != 'true'
run: |
printenv LINUX_HPCKIT_URL
bash tools/install_intel_oneAPI_linux.sh $LINUX_HPCKIT_URL
- name: Setup Conda
uses: conda-incubator/setup-miniconda@505e6394dae86d6a5c7fbb6e3fb8938e3e863830 # v3.1.1
with:
python-version: ${{ matrix.python-version }}
channels: conda-forge
channel-priority: true
activate-environment: scipy-dev
use-only-tar-bz2: false
miniforge-variant: Miniforge3
miniforge-version: latest
use-mamba: true
- name: Update Conda Environment
shell: bash -l {0}
run: |
conda activate scipy-dev
conda install -c conda-forge pkg-config meson meson-python ninja numpy cython pybind11 pytest pytest-xdist pytest-timeout pooch hypothesis spin "click<8.3.0"
- name: Initialise Intel oneAPI and Build SciPy
shell: bash -l {0}
run: |
. /opt/intel/oneapi/setvars.sh --force
conda activate scipy-dev
# Note that we have to install Pythran from PyPI to avoid pulling in compilers
# from conda-forge (those may break Intel Fortran, which rely on system GCC/Clang,
# xref https://github.com/conda-forge/pythran-feedstock/issues/77).
pip install pythran
unset FFLAGS
CC=icx CXX=icpx FC=ifx spin build -- -Dblas=mkl-dynamic-lp64-iomp -Dlapack=mkl-dynamic-lp64-iomp
- name: Test scipy
shell: bash -l {0}
run: |
. /opt/intel/oneapi/setvars.sh --force
conda activate scipy-dev
spin test

View File

@ -1,212 +0,0 @@
name: macOS tests
on:
push:
branches:
- maintenance/**
pull_request:
branches:
- main
- maintenance/**
permissions:
contents: read # to fetch code (actions/checkout)
env:
INSTALLDIR: "build-install"
CCACHE_DIR: "${{ github.workspace }}/.ccache"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
get_commit_message:
name: Get commit message
uses: ./.github/workflows/commit_message.yml
test_meson:
name: Conda & umfpack/scikit-sparse, fast, py3.11/npAny, spin
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: macos-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- uses: prefix-dev/setup-pixi@ba3bb36eb2066252b2363392b7739741bb777659 # v0.8.1
with:
pixi-version: v0.55.0
cache: false
environments: py312-system-libs-osx
- name: Set up ccache
uses: ./.github/ccache
with:
workflow_name: ${{ github.workflow }}
- name: Test SciPy
shell: bash -l {0}
run: |
rm -rf ../../subprojects/boost_math # so will fail if system boost doesn't work
rm -rf ../../subprojects/qhull_r # so will fail if system qhull doesn't work
export OMP_NUM_THREADS=2
pixi run test-system-libs -j2
- name: Ccache statistics
shell: bash -l {0}
run: pixi exec ccache -s
test_scipy_openblas:
name: M1 & OpenBLAS, fast, py3.11/npAny, spin
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: macos-14
strategy:
matrix:
python-version: ["3.11"]
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- uses: prefix-dev/setup-pixi@ba3bb36eb2066252b2363392b7739741bb777659 # v0.8.1
with:
pixi-version: v0.55.0
cache: false
environments: scipy-openblas
- name: Test with scipy-openblas32
run: pixi run test-scipy-openblas
test_accelerate:
name: Accelerate, full, py3.13/npAny, spin
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: macos-15
strategy:
matrix:
environment: [accelerate-lp64, accelerate-ilp64]
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- uses: prefix-dev/setup-pixi@ba3bb36eb2066252b2363392b7739741bb777659 # v0.8.1
with:
pixi-version: v0.55.0
cache: false
environments: ${{ matrix.environment }}
- name: Test SciPy
# mpmath tests are too slow for CI
run: pixi run test-${{ matrix.environment }} -m full -- -k "not mpmath"
test_macos_x86_64_accelerate:
name: macos15, x86_64, accelerate
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: macos-15-intel
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: Setup Python
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: "3.13"
cache: 'pip'
allow-prereleases: true
- name: Test SciPy
run: |
export FC=gfortran-13
pip install -r requirements/build.txt
pip install -r requirements/dev.txt
spin build --with-accelerate
pip install -r requirements/test.txt
spin test
clang_ASAN:
name: Test under AddressSanitizer
runs-on: macos-15
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
fetch-tags: true
persist-credentials: false
- name: Set up pyenv
run: |
git clone https://github.com/pyenv/pyenv.git "$HOME/.pyenv"
PYENV_ROOT="$HOME/.pyenv"
PYENV_BIN="$PYENV_ROOT/bin"
PYENV_SHIMS="$PYENV_ROOT/shims"
echo "$PYENV_BIN" >> $GITHUB_PATH
echo "$PYENV_SHIMS" >> $GITHUB_PATH
echo "PYENV_ROOT=$PYENV_ROOT" >> $GITHUB_ENV
- name: Set up LLVM
run: |
brew install llvm@19
LLVM_PREFIX=$(brew --prefix llvm@19)
echo CC="$LLVM_PREFIX/bin/clang" >> $GITHUB_ENV
echo CXX="$LLVM_PREFIX/bin/clang++" >> $GITHUB_ENV
echo LDFLAGS="-L$LLVM_PREFIX/lib" >> $GITHUB_ENV
echo CPPFLAGS="-I$LLVM_PREFIX/include" >> $GITHUB_ENV
- name: Build Python with AddressSanitizer
run: |
CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.14t
pyenv global 3.14t
- name: Install NumPy dependencies from PyPI
run: |
pip install meson-python ninja cython
- name: Build NumPy with ASan
run:
pip install numpy --no-binary numpy --no-build-isolation -Csetup-args="-Db_sanitize=address" -v
- name: Install SciPy dependencies from PyPI
run: |
pip install pybind11 pythran spin pooch hypothesis pytest pytest-timeout
- name: Make gfortran-13 on runner image usable
run: |
# Ensure we use gfortran-13 and that its runtime is on the library search path
FC=$(which gfortran-13)
GFORTRAN_LIB=$(dirname `$FC --print-file-name libgfortran.dylib`)
echo DYLD_LIBRARY_PATH=$GFORTRAN_LIB >> "$GITHUB_ENV"
echo FC=$FC >> "$GITHUB_ENV"
- name: Build SciPy with ASan
run: |
# Can't use Meson's `-Db_sanitize=address` because gfortran-13 will choke on it
export CFLAGS="-fsanitize=address -fno-omit-frame-pointer -fsanitize-ignorelist=$(pwd)/tools/asan-ignore.txt -Wno-deprecated-declarations"
export CXXFLAGS="-fsanitize=address -fno-omit-frame-pointer -fsanitize-ignorelist=$(pwd)/tools/asan-ignore.txt -Wno-deprecated-declarations"
spin build -S-Dblas=accelerate
- name: Test
run: |
# pass -s to pytest to see ASAN errors and warnings, otherwise pytest captures them
ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1:use_sigaltstack=0 \
spin test -- -v -s --timeout=600 --durations=10 -m 'not fail_asan'

View File

@ -1,86 +0,0 @@
name: Test musllinux_x86_64
on:
push:
branches:
- maintenance/**
pull_request:
branches:
- main
- maintenance/**
permissions:
contents: read # to fetch code (actions/checkout)
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
get_commit_message:
name: Get commit message
uses: ./.github/workflows/commit_message.yml
musllinux_x86_64:
name: musl Ubuntu-latest, fast, py3.12/npAny, spin
needs: get_commit_message
runs-on: ubuntu-latest
# If using act to run CI locally the github object does not exist and
# the usual skipping should not be enforced
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
container:
# Use container used for building musllinux wheels
# it has git installed, all the pythons, etc
image: quay.io/pypa/musllinux_1_2_x86_64
steps:
- name: Get source
run: |
apk update --quiet
apk add build-base gfortran git
git config --global --add safe.directory $PWD
if [ $GITHUB_EVENT_NAME != pull_request ]; then
git clone --recursive --branch=$GITHUB_REF_NAME https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE
git reset --hard $GITHUB_SHA
else
git clone --recursive https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE
git fetch origin $GITHUB_REF:my_ref_name
git checkout $GITHUB_BASE_REF
git -c user.email="you@example.com" merge --no-commit my_ref_name
fi
ln -s /usr/local/bin/python3.12 /usr/local/bin/python
git submodule update --init
- name: prep build environment
run: |
cd $RUNNER_TEMP
python -m venv test_env
source test_env/bin/activate
cd $GITHUB_WORKSPACE
python -m pip install cython numpy
# python -m pip install --upgrade --pre -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple numpy
python -m pip install meson ninja pybind11 pythran pytest hypothesis spin pooch "click<8.3.0"
python -m pip install -r requirements/openblas.txt
chmod +x tools/wheels/cibw_before_build_linux.sh
tools/wheels/cibw_before_build_linux.sh --nightly .
- name: test
run: |
set -xe -o
cd $RUNNER_TEMP
source test_env/bin/activate
cd $GITHUB_WORKSPACE
export PKG_CONFIG_PATH=$PWD
spin test

View File

@ -1,30 +0,0 @@
name: "Pull Request Labeler"
on:
pull_request_target:
types: [opened]
jobs:
label_pull_request:
# Permissions needed for labelling Pull Requests automatically
# https://docs.github.com/en/actions/security-guides/automatic-token-authentication
permissions:
contents: read
pull-requests: write
runs-on: ubuntu-latest
steps:
# label based on changed files
- uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5.0.0
continue-on-error: true
if: github.repository == 'scipy/scipy'
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
configuration-path: ".github/label-globs.yml"
# label based on PR title
- uses: github/issue-labeler@c1b0f9f52a63158c4adc09425e858e87b32e9685 # v3.4
if: github.repository == 'scipy/scipy'
with:
configuration-path: .github/labeler.yml
include-title: 1
include-body: 0
enable-versioned-regex: 0
repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,263 +0,0 @@
# Workflow to build and test wheels.
# To work on the wheel building infrastructure on a fork, comment out:
#
# if: github.repository == 'scipy/scipy'
#
# in the get_commit_message job include [wheel build] in your commit
# message to trigger the build. All files related to wheel building are located
# at tools/wheels/
name: Wheel builder
on:
schedule:
# ┌───────────── minute (0 - 59)
# │ ┌───────────── hour (0 - 23)
# │ │ ┌──────────── day of the month (1 - 31)
# │ │ │ ┌───────────── month (1 - 12 or JAN-DEC)
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
# │ │ │ │ │
- cron: "9 9 */2 * *"
push:
branches:
- maintenance/**
pull_request:
branches:
- main
- maintenance/**
workflow_dispatch:
permissions:
contents: read # to fetch code (actions/checkout)
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
get_commit_message:
name: Get commit message
runs-on: ubuntu-latest
if: github.repository == 'scipy/scipy'
outputs:
message: ${{ steps.commit_message.outputs.message }}
steps:
- name: Checkout scipy
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
# Gets the correct commit message for pull request
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Get commit message
id: commit_message
run: |
set -xe
COMMIT_MSG=$(git log --no-merges -1)
RUN="0"
if [[ "$COMMIT_MSG" == *"[wheel build]"* ]]; then
RUN="1"
fi
echo "message=$RUN" >> $GITHUB_OUTPUT
build_wheels:
name: Wheel, ${{ matrix.python[0] }}-${{ matrix.buildplat[1] }}
${{ matrix.buildplat[2] }} ${{ matrix.buildplat[3] }}
${{ matrix.buildplat[4] }}
needs: get_commit_message
if: >-
contains(needs.get_commit_message.outputs.message, '1') ||
github.event_name == 'schedule' ||
github.event_name == 'workflow_dispatch'
runs-on: ${{ matrix.buildplat[0] }}
strategy:
# Ensure that a wheel builder finishes even if another fails
fail-fast: false
matrix:
# Github Actions doesn't support pairing matrix values together, let's improvise
# https://github.com/github/feedback/discussions/7835#discussioncomment-1769026
buildplat:
# should also be able to do multi-archs on a single entry, e.g.
# [windows-2025, win*, "AMD64 x86"]. However, those two require a different compiler setup
# so easier to separate out here.
- [ubuntu-22.04, manylinux, x86_64, "", ""]
- [ubuntu-22.04, musllinux, x86_64, "", ""]
- [ubuntu-24.04-arm, manylinux, aarch64, "", ""]
- [ubuntu-24.04-arm, musllinux, aarch64, "", ""]
- [macos-15-intel, macosx, x86_64, openblas, "10.14"]
- [macos-15-intel, macosx, x86_64, accelerate, "14.0"]
- [macos-14, macosx, arm64, openblas, "12.3"]
- [macos-14, macosx, arm64, accelerate, "14.0"]
- [windows-2025, win, AMD64, "", ""]
- [windows-11-arm, win, ARM64, "", ""]
python: [["cp312", "3.12"], ["cp313", "3.13"], ["cp313t", "3.13"], ["cp314", "cp314"], ["cp314t", "cp314"]]
# python[0] is used to specify the python versions made by cibuildwheel
env:
IS_32_BIT: ${{ matrix.buildplat[2] == 'x86' }}
# upload to staging if it's a push to a maintenance branch and the last
# commit message contains '[wheel build]'
IS_PUSH: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/heads/maintenance') && contains(needs.get_commit_message.outputs.message, '1') }}
IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
steps:
- name: Checkout scipy
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: true
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: 3.12
- name: win_amd64 - install rtools
if: ${{ matrix.buildplat[1] == 'win' && matrix.buildplat[2] == 'AMD64' }}
run: |
# mingw-w64
choco install rtools -y --no-progress --force --version=4.0.0.20220206
echo "c:\rtools40\ucrt64\bin;" >> $env:GITHUB_PATH
- name: windows - set PKG_CONFIG_PATH
if: ${{ matrix.buildplat[1] == 'win' }}
run: |
$env:CIBW = "${{ github.workspace }}"
# It seems somewhere in the env passing, `\` is not
# passed through, so convert it to '/'
$env:CIBW=$env:CIBW.replace("\","/")
echo "CIBW_ENVIRONMENT=PKG_CONFIG_PATH=$env:CIBW" >> $env:GITHUB_ENV
- name: Set environment variables for ARM64
if: matrix.buildplat[1] == 'win' && matrix.buildplat[2] == 'ARM64'
run: |
echo "CC=clang-cl" >> $env:GITHUB_ENV
echo "CXX=clang-cl" >> $env:GITHUB_ENV
echo "FC=flang" >> $env:GITHUB_ENV
echo "TARGET_ARCH=${{ matrix.buildplat[2] }}" >> $env:GITHUB_ENV
- name: Set up Flang and pkgconf for ARM64
if: matrix.buildplat[1] == 'win' && matrix.buildplat[2] == 'ARM64'
uses: ./.github/windows_arm64_steps
- name: Setup macOS
if: startsWith( matrix.buildplat[0], 'macos-' )
run: |
if [[ ${{ matrix.buildplat[3] }} == 'accelerate' ]]; then
echo CIBW_CONFIG_SETTINGS=\"setup-args=-Dblas=accelerate\" >> "$GITHUB_ENV"
# Always use preinstalled gfortran for Accelerate builds
ln -s $(which gfortran-13) gfortran
export PATH=$PWD:$PATH
echo "PATH=$PATH" >> "$GITHUB_ENV"
LIB_PATH=$(dirname $(gfortran --print-file-name libgfortran.dylib))
fi
# XCODE_VER='16.0'
# CIBW="sudo xcode-select -s /Applications/Xcode_${XCODE_VER}.app"
# echo "CIBW_BEFORE_ALL=$CIBW" >> $GITHUB_ENV
# setting SDKROOT necessary when using the gfortran compiler
# installed in cibw_before_build_macos.sh
# sudo xcode-select -s /Applications/Xcode_${XCODE_VER}.app
CIBW="MACOSX_DEPLOYMENT_TARGET=${{ matrix.buildplat[4] }}\
SDKROOT=$(xcrun --sdk macosx --show-sdk-path)\
PKG_CONFIG_PATH=${{ github.workspace }}"
echo "CIBW_ENVIRONMENT=$CIBW" >> "$GITHUB_ENV"
echo "REPAIR_PATH=$LIB_PATH" >> "$GITHUB_ENV"
PREFIX=DYLD_LIBRARY_PATH="\$(dirname \$(gfortran --print-file-name libgfortran.dylib))"
# remove libgfortran from location used for linking (if any), to
# check wheel has bundled things correctly and all tests pass without
# needing installed gfortran
POSTFIX=" sudo rm -rf /opt/gfortran-darwin-x86_64-native &&\
sudo rm -rf /usr/local/gfortran/lib"
CIBW="$PREFIX delocate-listdeps -d {wheel} && echo "-----------" &&\
$PREFIX delocate-wheel -v $EXCLUDE --require-archs \
{delocate_archs} -w {dest_dir} {wheel} && echo "-----------" &&\
delocate-listdeps -d {dest_dir}/*.whl && echo "-----------" &&\
$POSTFIX"
# macos-arm64-openblas wheels that target macos-12 need a
# MACOS_DEPLOYMENT_TARGET of 12.3 otherwise delocate complains.
# Unclear of cause, possibly build tool related.
# This results in wheels that have 12_3 in their name. Since Python
# has no concept of minor OS versions in packaging land rename the
# wheel back to 12.
if [[ ${{ matrix.buildplat[0] }} == 'macos-14' && ${{ matrix.buildplat[4] }} == '12.3' ]]; then
CIBW+=" && echo \$(ls {dest_dir}) && \
mv {dest_dir}/*.whl \$(find {dest_dir} -type f -name '*.whl' | sed 's/12_3/12_0/')"
fi
echo "CIBW_REPAIR_WHEEL_COMMAND_MACOS=$CIBW" >> "$GITHUB_ENV"
# - name: Inject environment variable for python dev version
# if: ${{ matrix.python[1] == '3.14-dev'
# shell: bash
# run: |
# # For dev versions of python need to use wheels from scientific-python-nightly-wheels
# # When the python version is released please comment out this section, but do not remove
# # (there will soon be another dev version to target).
# DEPS0="pip install --pre -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple numpy"
# DEPS1="pip install ninja meson-python pybind11 pythran cython"
# CIBW="$DEPS0;$DEPS1;bash {project}/tools/wheels/cibw_before_build_linux.sh {project}"
# echo "CIBW_BEFORE_BUILD_LINUX=$CIBW" >> "$GITHUB_ENV"
# CIBW="$DEPS0 && $DEPS1 && bash {project}/tools/wheels/cibw_before_build_win.sh {project}"
# echo "CIBW_BEFORE_BUILD_WINDOWS=$CIBW" >> "$GITHUB_ENV"
# CIBW="$DEPS0;$DEPS1;bash {project}/tools/wheels/cibw_before_build_macos.sh {project}"
# echo "CIBW_BEFORE_BUILD_MACOS=$CIBW" >> "$GITHUB_ENV"
# echo "CIBW_BEFORE_TEST=$DEPS0" >> "$GITHUB_ENV"
# CIBW="build; args: --no-isolation"
# echo "CIBW_BUILD_FRONTEND=$CIBW" >> "$GITHUB_ENV"
- name: Build wheels
uses: pypa/cibuildwheel@ffd835cef18fa11522f608fc0fa973b89f5ddc87 # v3.1.0
env:
CIBW_BUILD: ${{ matrix.python[0] }}-${{ matrix.buildplat[1] }}*
CIBW_ARCHS: ${{ matrix.buildplat[2] }}
CIBW_ENABLE: cpython-freethreading cpython-prerelease
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
path: ./wheelhouse/*.whl
name: ${{ matrix.python[0] }}-${{ matrix.buildplat[1] }}
${{ matrix.buildplat[2] }} ${{ matrix.buildplat[3] }}
${{ matrix.buildplat[4] }}
- uses: conda-incubator/setup-miniconda@505e6394dae86d6a5c7fbb6e3fb8938e3e863830 # v3.1.1
with:
# for installation of anaconda-client, required for upload to
# anaconda.org
# default (and activated) environment name is test
# Note that this step is *after* specific pythons have been used to
# build and test the wheel
auto-update-conda: true
python-version: "3.12"
miniforge-version: latest
conda-remove-defaults: "true"
architecture: ${{ (matrix.buildplat[1] == 'win' && matrix.buildplat[2] == 'ARM64') && 'x64' || '' }}
- name: Upload wheels
if: success()
shell: bash -el {0}
# see https://github.com/marketplace/actions/setup-miniconda for why
# `-el {0}` is required.
env:
SCIPY_STAGING_UPLOAD_TOKEN: ${{ secrets.SCIPY_STAGING_UPLOAD_TOKEN }}
SCIPY_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.SCIPY_NIGHTLY_UPLOAD_TOKEN }}
run: |
conda install -y anaconda-client
source tools/wheels/upload_wheels.sh
set_upload_vars
# For cron jobs (restricted to main branch) or "Run workflow" trigger
# an upload to:
#
# https://anaconda.org/scientific-python-nightly-wheels/scipy
#
# Pushes to a maintenance branch that contain '[wheel build]' will
# cause wheels to be built and uploaded to:
#
# https://anaconda.org/multibuild-wheels-staging/scipy
#
# The tokens were originally generated at anaconda.org
upload_wheels

View File

@ -1,257 +0,0 @@
name: Windows tests
on:
push:
branches:
- maintenance/**
pull_request:
branches:
- main
- maintenance/**
permissions:
contents: read # to fetch code (actions/checkout)
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
get_commit_message:
name: Get commit message
uses: ./.github/workflows/commit_message.yml
fast_spin:
name: fast, py3.12/npAny, spin
needs: get_commit_message
# Ensure (a) this doesn't run on forks by default, and
# (b) it does run with Act locally (`github` doesn't exist there)
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: windows-2025
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- uses: prefix-dev/setup-pixi@ba3bb36eb2066252b2363392b7739741bb777659 # v0.8.1
with:
pixi-version: v0.56.0
cache: false
environments: >-
build
test
- name: Build
run: pixi run build
- name: Test
run: |
# test runner parallel clashes with OpenBLAS multithreading
$env:OPENBLAS_NUM_THREADS=1
pixi run test -j4 -- --durations=25
#############################################################################
full_spin_min_numpy:
name: full, py3.12/npMin, spin
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: windows-2025
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: Setup Python
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: "3.12"
cache: "pip"
cache-dependency-path: "environment.yml"
- name: Install rtools (mingw-w64)
run: |
choco install rtools -y --no-progress --force --version=4.0.0.20220206
echo "c:\rtools40\ucrt64\bin;" >> $env:GITHUB_PATH
- name: pip-packages
run: |
# 2.0.0 is currently our oldest supported NumPy version
python -m pip install numpy==2.0.0 cython pybind11 pythran meson-python meson ninja pytest pytest-xdist pytest-timeout pooch spin hypothesis "click<8.3.0"
python -m pip install -r requirements/openblas.txt
- name: Build
run: |
spin build --with-scipy-openblas
- name: Test
run: |
# test runner parallel clashes with OpenBLAS multithreading
$env:OPENBLAS_NUM_THREADS=1
spin test -j4 --mode full -- --durations=25 --timeout=60
#############################################################################
full_build_sdist_wheel:
# TODO: enable ILP64 once possible
name: no pythran & sdist+wheel, full, py3.12/npPre, pip+pytest
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: windows-2025
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: Setup Python
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: "3.12"
cache: "pip"
cache-dependency-path: "environment.yml"
- name: Win_amd64 - install rtools
run: |
# mingw-w64
choco install rtools -y --no-progress --force --version=4.0.0.20220206
echo "c:\rtools40\ucrt64\bin;" >> $env:GITHUB_PATH
- name: Install OpenBLAS
shell: bash
run: |
set -xe
python -m pip install -r requirements/openblas.txt
bash tools/wheels/cibw_before_build_win.sh .
echo "PKG_CONFIG_PATH=${{ github.workspace }}" >> $GITHUB_ENV
- name: pip-packages
run: |
python -m pip install build delvewheel numpy cython pybind11 meson-python meson ninja pytest pytest-xdist pytest-timeout pooch hypothesis
- name: Build
shell: bash
run: |
python -m build -Csetup-args="-Duse-pythran=false"
# Vendor openblas.dll and the DLL's it depends on into the wheel
# Ignore `libsf_error_state.dll` for special function error handling;
# it will be loaded using ctypes in scipy/special/__init__.py.
wheel_name=$(ls dist/*.whl)
openblas_dir=$(python -c"import scipy_openblas32 as sop; print(sop.get_lib_dir())")
delvewheel repair --add-path $openblas_dir --no-dll libsf_error_state.dll -w wheelhouse $wheel_name
python -m pip install wheelhouse/*
- name: Test
run: |
cd $RUNNER_TEMP
# run full test suite
# test runner parallel clashes with OpenBLAS multithreading
$env:OPENBLAS_NUM_THREADS=1
pytest --pyargs scipy
#############################################################################
fast_spin_arm64:
name: fast, py3.12/npAny, spin (Win-ARM64)
runs-on: windows-11-arm
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1 &&
(github.repository == 'scipy/scipy' || github.repository == '')
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: Setup Python
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: "3.12"
architecture: "arm64"
- name: Set up Flang and pkgconf for ARM64
uses: ./.github/windows_arm64_steps
- name: pip-packages
run: |
pip install numpy cython pybind11 pythran meson ninja pytest pytest-xdist pytest-timeout pooch spin hypothesis "click<8.3.0"
python -m pip install -r requirements/openblas.txt
- name: Build
run: |
$env:cc = "clang-cl"
$env:cxx = "clang-cl"
$env:fc = "flang-new"
spin build --with-scipy-openblas
- name: Test
run: |
$env:OPENBLAS_NUM_THREADS = 1
spin test -j2 -- --durations=25
#############################################################################
full_build_sdist_wheel_arm64:
name: no pythran & sdist+wheel, full, py3.12/npPre, pip+pytest (Win-ARM64)
runs-on: windows-11-arm
needs: get_commit_message
if: >
needs.get_commit_message.outputs.message == 1 &&
(github.repository == 'scipy/scipy' || github.repository == '')
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: Setup Python
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: "3.12"
architecture: "arm64"
cache: "pip"
cache-dependency-path: "environment.yml"
- name: Set up Flang and pkgconf for ARM64
uses: ./.github/windows_arm64_steps
- name: Install OpenBLAS
shell: bash
run: |
set -xe
export PKG_CONFIG_PATH=$(cygpath -u "$PKG_CONFIG_PATH")
export PATH="$(cygpath -u 'C:\vcpkg\installed\arm64-windows\tools\pkgconf'):$PATH"
python -m pip install -r requirements/openblas.txt
bash tools/wheels/cibw_before_build_win.sh .
echo "PKG_CONFIG_PATH=$(cygpath -w "${{ github.workspace }}")" >> $GITHUB_ENV
- name: pip-packages
run: |
python -m pip install build delvewheel cython pybind11 meson-python meson ninja pytest pytest-xdist pytest-timeout pooch hypothesis numpy
- name: Build
shell: bash
run: |
set -xe
export CC=clang-cl
export CXX=clang-cl
export FC=flang-new
export PATH="$(cygpath -u 'C:\vcpkg\installed\arm64-windows\tools\pkgconf'):$PATH"
python -m build --no-isolation -x -Csetup-args="-Duse-pythran=false"
wheel_name=$(ls dist/*.whl)
openblas_dir=$(python -c"import scipy_openblas32 as sop; print(sop.get_lib_dir())")
delvewheel repair --add-path "$openblas_dir" -w wheelhouse "$wheel_name"
python -m pip install wheelhouse/*
- name: Test
shell: pwsh
run: |
Set-Location $env:RUNNER_TEMP
$env:OPENBLAS_NUM_THREADS = 1
pytest --pyargs scipy -n2

View File

@ -1,115 +0,0 @@
name: Windows tests (MSVC + ifx + OpenBLAS)
# TODO: replace OpenBLAS with MKL. This is harder to get to build, so we merged with OpenBLAS first.
on:
push:
branches:
- maintenance/**
pull_request:
branches:
- main
- maintenance/**
schedule:
# ┌───────────── minute (0 - 59)
# │ ┌───────────── hour (0 - 23)
# │ │ ┌───────────── day of the month (1 - 31)
# │ │ │ ┌───────────── month (1 - 12 or JAN-DEC)
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
# │ │ │ │ │
- cron: "9 9 2/2 * *"
permissions:
contents: read # to fetch code (actions/checkout)
# The environment variable WINDOWS_BASEKIT_URL and WINDOWS_HPC_URL
# store the URL for downloading Intel oneAPI.
# Reference - https://github.com/oneapi-src/oneapi-ci/blob/b4a96bd1888c130fcb73524d2b77b90f43681cbc/.github/workflows/build_all.yml#L11-L12
env:
WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/0a91e6dc-9c09-4e0f-9113-1f28bf7e8be2/intel-fortran-essentials-2025.0.1.28_offline.exe
WINDOWS_HPC_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/a37c30c3-a846-4371-a85d-603e9a9eb94c/intel-oneapi-hpc-toolkit-2025.0.1.48_offline.exe
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
get_commit_message:
name: Get commit message
uses: ./.github/workflows/commit_message.yml
msvc_ifx_blas:
name: py3.12, spin
needs: get_commit_message
# Ensure (a) this doesn't run on forks by default, and
# (b) it does run with Act locally (`github` doesn't exist there)
if: >
needs.get_commit_message.outputs.message == 1
&& (github.repository == 'scipy/scipy' || github.repository == '')
runs-on: windows-2022
defaults:
run:
shell: powershell
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: cache install
id: cache-install
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
with:
path: |
C:\Program Files (x86)\Intel\oneAPI\compiler
C:\Program Files (x86)\Intel\oneAPI\setvars.bat
C:\Program Files (x86)\Intel\oneAPI\setvars-vcvarsall.bat
key: install-${{ env.WINDOWS_HPC_URL }}-${{ env.WINDOWS_BASEKIT_URL }}-compiler
- name: Install oneAPI Base kit
if: steps.cache-install.outputs.cache-hit != 'true'
run: |
echo %WINDOWS_BASEKIT_URL%
tools/install_intel_oneAPI_windows.bat %WINDOWS_BASEKIT_URL%
- name: Install oneAPI HPC kit
if: steps.cache-install.outputs.cache-hit != 'true'
run: |
echo %WINDOWS_HPC_URL%
tools/install_intel_oneAPI_windows.bat %WINDOWS_HPC_URL%
- name: Setup Conda
uses: conda-incubator/setup-miniconda@505e6394dae86d6a5c7fbb6e3fb8938e3e863830 # v3.1.1
with:
python-version: 3.12
channels: conda-forge
channel-priority: strict
use-only-tar-bz2: false
miniforge-variant: Miniforge3
miniforge-version: latest
auto-activate-base: true
activate-environment: true
- run: conda --version
- run: which python
- name: Install packages from conda
shell: cmd /C call {0}
run: |
conda install -c conda-forge pkg-config meson meson-python ninja openblas libblas=*=*openblas numpy==2.0 cython pybind11 pytest pytest-xdist pytest-timeout pooch spin hypothesis "click<8.3.0"
# MSVC is unable to compile Pythran code, therefore we need to use
# -C-Duse-pythran=false while building SciPy.
# Reference - https://github.com/serge-sans-paille/pythran/issues/2215
- name: Initialise Intel oneAPI, MSVC and Build SciPy
shell: cmd /C call {0}
run: |
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
set FC=ifx
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
spin build -S-Duse-pythran=false -S--vsenv
# "import scipy; scipy.test();" fails because
# scipy/sparse/linalg/_eigen/arpack crashes.
# Reference - https://github.com/scipy/scipy/issues/20728
- name: Test scipy.datasets
shell: cmd /C call {0}
run: |
spin test -s datasets

139
.gitignore vendored
View File

@ -5,23 +5,18 @@
*~
*$
*.bak
.idea/
.idea/*
*.kdev4
*.org
.project
.pydevproject
*.rej
.settings/
.spyproject/
.*.sw[nop]
.sw[nop]
*.tmp
*.vim
tags
.venv/
venv/
.theia/
.vscode/
# Compiled source #
###################
@ -56,14 +51,13 @@ _configtest.c
# Python files #
################
# build directories
# setup.py working directory
build
build-*
# sphinx build directory
doc/_build
# cython files
cythonize.dat
# sdist directory
# setup.py dist directory
dist
# Egg metadata
*.egg-info
@ -80,22 +74,10 @@ setup.cfg
.deps
.libs
.eggs
pip-wheel-metadata
# Meson #
#########
.mesonpy-native-file.ini
installdir/
.mesonpy/
.wraplock
# doit
######
.doit.db.dat
.doit.db.dir
.doit.db.db
.doit.db
doc/source/.jupyterlite.doit.db
# Paver generated files #
#########################
/release
# Logs and databases #
######################
@ -105,21 +87,8 @@ doc/source/.jupyterlite.doit.db
# pytest cache #
################
.cache/
.pytest_cache/
# GitHub cache #
################
gh_cache.json
# mypy cache #
##############
.mypy_cache/
# linter #
##########
.ruff_cache/
.pre-commit-workdir/
.cache/*
.pytest_cache/*
# Patches #
###########
@ -141,9 +110,7 @@ Thumbs.db
# Documentation generated files #
#################################
doc/frontpage/build
doc/source/reference/generated
**/.ipynb_checkpoints
doc/source/_contents
doc/source/generated
# Things specific to this project #
###################################
@ -152,34 +119,29 @@ benchmarks/results
benchmarks/scipy
benchmarks/html
benchmarks/scipy-benchmarks
.github/workflows/.pixi
.openblas
scipy-openblas.pc
scipy/_distributor_init_local.py
scipy/__config__.py
scipy/_lib/_ccallback_c.c
scipy/_lib/messagestream.c
scipy/_lib/src/messagestream_config.h
scipy/_lib/_test_deprecation_call.c
scipy/_lib/_test_deprecation_def.c
scipy/_lib/_test_deprecation_def.h
scipy/cluster/_vq.c
scipy/cluster/_hierarchy.c
scipy/cluster/_optimal_leaf_ordering.c
scipy/fftpack/_fftpackmodule.c
scipy/fftpack/convolvemodule.c
scipy/fftpack/convolve.c
scipy/fftpack/src/dct.c
scipy/fftpack/src/dst.c
scipy/interpolate/_rbfinterp_pythran.cpp
scipy/integrate/_dopmodule.c
scipy/integrate/lsodamodule.c
scipy/integrate/vodemodule.c
scipy/interpolate/_ppoly.c
scipy/interpolate/_rgi_cython.c
scipy/interpolate/_bspl.c
scipy/interpolate/interpnd.c
scipy/interpolate/src/dfitpack-f2pywrappers.f
scipy/interpolate/src/dfitpackmodule.c
scipy/io/_test_fortranmodule.c
scipy/io/matlab/_mio5_utils.c
scipy/io/matlab/_mio_utils.c
scipy/io/matlab/_streams.c
scipy/io/matlab/mio5_utils.c
scipy/io/matlab/mio_utils.c
scipy/io/matlab/streams.c
scipy/lib/blas/cblas.pyf
scipy/lib/blas/cblasmodule.c
scipy/lib/blas/fblas-f2pywrappers.f
@ -200,7 +162,6 @@ scipy/linalg/_interpolativemodule.c
scipy/linalg/_solve_toeplitz.c
scipy/linalg/_decomp_update.c
scipy/linalg/_decomp_update.pyx
scipy/linalg/_cythonized_array_utils.c
scipy/linalg/_blas_subroutine_wrappers.f
scipy/linalg/_blas_subroutines.h
scipy/linalg/_lapack_subroutine_wrappers.f
@ -215,52 +176,59 @@ scipy/linalg/cython_blas.pxd
scipy/linalg/cython_blas.pyx
scipy/linalg/cython_lapack.pxd
scipy/linalg/cython_lapack.pyx
scipy/linalg/_matfuncs_sqrtm_triu.c
scipy/linalg/_matfuncs_sqrtm_triu.cpp
scipy/linalg/src/id_dist/src/*_subr_*.f
scipy/ndimage/src/_ni_label.c
scipy/ndimage/src/_cytest.c
scipy/optimize/_bglu_dense.c
scipy/optimize/cobyla/_cobylamodule.c
scipy/optimize/_group_columns.cpp
scipy/optimize/lbfgsb/_lbfgsbmodule.c
scipy/optimize/minpack2/minpack2module.c
scipy/optimize/nnls/_nnlsmodule.c
scipy/optimize/slsqp/_slsqpmodule.c
scipy/optimize/_lsq/givens_elimination.c
scipy/optimize/_trlib/_trlib.c
scipy/optimize/tnc/moduleTNC.c
scipy/optimize/tnc/_moduleTNC.c
scipy/signal/_peak_finding_utils.c
scipy/signal/_spectral.c
scipy/signal/_max_len_seq_inner.c
scipy/signal/_max_len_seq_inner.cpp
scipy/signal/_sosfilt.c
scipy/signal/_upfirdn_apply.c
scipy/signal/_correlate_nd.c
scipy/signal/_lfilter.c
scipy/signal/_bspline_util.c
scipy/signal/correlate_nd.c
scipy/signal/lfilter.c
scipy/sparse/_csparsetools.c
scipy/sparse/_csparsetools.pyx
scipy/sparse/csgraph/_min_spanning_tree.c
scipy/sparse/csgraph/_shortest_path.cxx
scipy/sparse/csgraph/_shortest_path.c
scipy/sparse/csgraph/_tools.c
scipy/sparse/csgraph/_traversal.c
scipy/sparse/csgraph/_flow.c
scipy/sparse/csgraph/_matching.c
scipy/sparse/csgraph/_reordering.c
scipy/sparse/linalg/dsolve/umfpack/_umfpack.py
scipy/sparse/linalg/dsolve/umfpack/_umfpack_wrap.c
scipy/sparse/linalg/eigen/arpack/_arpack-f2pywrappers.f
scipy/sparse/linalg/eigen/arpack/_arpackmodule.c
scipy/sparse/linalg/eigen/arpack/arpack.pyf
scipy/sparse/linalg/isolve/iterative/BiCGREVCOM.f
scipy/sparse/linalg/isolve/iterative/BiCGSTABREVCOM.f
scipy/sparse/linalg/isolve/iterative/CGREVCOM.f
scipy/sparse/linalg/isolve/iterative/CGSREVCOM.f
scipy/sparse/linalg/isolve/iterative/GMRESREVCOM.f
scipy/sparse/linalg/isolve/iterative/QMRREVCOM.f
scipy/sparse/linalg/isolve/iterative/STOPTEST2.f
scipy/sparse/linalg/isolve/iterative/_iterative.pyf
scipy/sparse/linalg/isolve/iterative/_iterativemodule.c
scipy/sparse/linalg/isolve/iterative/getbreak.f
scipy/sparse/sparsetools/bsr_impl.h
scipy/sparse/sparsetools/csc_impl.h
scipy/sparse/sparsetools/csr_impl.h
scipy/sparse/sparsetools/other_impl.h
scipy/sparse/sparsetools/sparsetools_impl.h
scipy/spatial/_ckdtree.cxx
scipy/spatial/ckdtree.cxx
scipy/spatial/ckdtree.h
scipy/spatial/_hausdorff.c
scipy/spatial/_qhull.c
scipy/spatial/qhull.c
scipy/spatial/_voronoi.c
scipy/spatial/transform/_rotation.c
scipy/special/_comb.c
scipy/special/_ellip_harm_2.c
scipy/special/_ellip_harm_2.h
scipy/special/_logit.c
scipy/special/_test_internal.c
scipy/special/_test_round.c
scipy/special/_ufuncs.c
scipy/special/_ufuncs.h
scipy/special/_ufuncs.pyx
@ -272,27 +240,16 @@ scipy/special/_ufuncs_cxx_defs.h
scipy/special/_ufuncs_defs.h
scipy/special/cython_special.c
scipy/special/cython_special.h
scipy/special/_specfunmodule.c
scipy/special/cython_special.pxd
scipy/special/cython_special.pyx
scipy/special/specfunmodule.c
scipy/special/tests/data/*.npz
scipy/special/ellint_carlson_cpp_lite/Makefile
scipy/special/ellint_carlson_cpp_lite/cellint.*
scipy/special/ellint_carlson_cpp_lite/tests
scipy/stats/_rank.c
scipy/stats/_mvnmodule.c
scipy/stats/_statlibmodule.c
scipy/stats/mvn-f2pywrappers.f
scipy/stats/mvnmodule.c
scipy/stats/statlibmodule.c
scipy/stats/vonmises_cython.c
scipy/stats/_stats.c
scipy/stats/_levy_stable/levyst.c
scipy/stats/_biasedurn.cxx
scipy/stats/biasedurn.cxx
scipy/stats/_sobol.c
scipy/stats/_qmc_cy.cxx
scipy/stats/_hypotests_pythran.cpp
scipy/stats/_unuran/unuran_wrapper.c
scipy/stats/_rcont/rcont.c
scipy/stats/_stats_pythran.cpp
scipy/version.py
scipy/special/_exprel.c
scipy/optimize/_group_columns.c
scipy/optimize/cython_optimize/_zeros.c
scipy/optimize/cython_optimize/_zeros.pyx

35
.gitmodules vendored
View File

@ -1,32 +1,9 @@
[submodule "doc/scipy-sphinx-theme"]
path = doc/scipy-sphinx-theme
url = https://github.com/scipy/scipy-sphinx-theme.git
[submodule "doc/sphinxext"]
path = doc/sphinxext
url = https://github.com/numpy/numpydoc.git
[submodule "doc/source/_static/scipy-mathjax"]
path = doc/source/_static/scipy-mathjax
url = https://github.com/scipy/scipy-mathjax.git
[submodule "scipy/_lib/unuran"]
path = scipy/_lib/unuran
url = https://github.com/scipy/unuran.git
[submodule "scipy/_lib/array_api_compat"]
path = scipy/_lib/array_api_compat
url = https://github.com/data-apis/array-api-compat.git
[submodule "scipy/_lib/pocketfft"]
path = scipy/_lib/pocketfft
url = https://github.com/scipy/pocketfft
[submodule "scipy/_lib/cobyqa"]
path = scipy/_lib/cobyqa
url = https://github.com/cobyqa/cobyqa.git
[submodule "scipy/_lib/array_api_extra"]
path = scipy/_lib/array_api_extra
url = https://github.com/data-apis/array-api-extra.git
# All submodules used as a Meson `subproject` are required to be under the
# subprojects/ directory - see:
# https://mesonbuild.com/Subprojects.html#why-must-all-subprojects-be-inside-a-single-directory
[submodule "subprojects/highs"]
path = subprojects/highs
url = https://github.com/scipy/HiGHs
[submodule "subprojects/boost_math/math"]
path = subprojects/boost_math/math
url = https://github.com/boostorg/math.git
[submodule "subprojects/xsf"]
path = subprojects/xsf
url = https://github.com/scipy/xsf.git

332
.mailmap
View File

@ -10,43 +10,14 @@
# This file is up-to-date if the command git log --format="%aN <%aE>" | sort -u
# does not list the same person multiple times.
#
@awakenting <awakenting@users.noreply.github.com> awakenting <awakenting@users.noreply.github.com>
@axiru <axiru@users.noreply.gihub.com> axiru <axiru@users.noreply.gihub.com>
@cel4 <cel4@users.noreply.github.com> cel4 <cel4@users.noreply.github.com>
@chemelnucfin <aw@sp3.com> aw <aw@sp3.com>
@endolith <endolith@gmail.com> Endolith <endolith@gmail.com>
@endolith <endolith@gmail.com> endolith <endolith@gmail.com>
@FormerPhysicist <Former@physicist.net> FormerPhysicist <Former@physicist.net>
@gaulinmp <gaulinmp+git@gmail.com> Mac <gaulinmp+git@gmail.com>
@h-vetinari <h.vetinari@gmx.com> h-vetinari <h.vetinari@gmx.com>
@h-vetinari <h.vetinari@gmx.com> H. Vetinari <h.vetinari@gmx.com>
@ksemb <ksms@gmx.de> ksemb <ksms@gmx.de>
@kshitij12345 <kshitijkalambarkar@gmail.com> kshitij12345 <kshitijkalambarkar@gmail.com>
@luzpaz <luzpaz@users.noreply.github.com> Unknown <kunda@scribus.net>
@luzpaz <luzpaz@users.noreply.github.com> luz.paz <luzpaz@users.noreply.github.com>
@luzpaz <luzpaz@users.noreply.github.com> luzpaz <luzpaz@users.noreply.github.com>
@mamrehn <mamrehn@users.noreply.github.com> mamrehn <mamrehn@users.noreply.github.com>
@NKrvavica <35114994+NKrvavica@users.noreply.github.com> NKrvavica <35114994+NKrvavica@users.noreply.github.com>
@rafalalgo <rafal.byczek@student.uj.edu.pl> rafalalgo <rafal.byczek@student.uj.edu.pl>
@samyak0210 <30871632+samyak0210@users.noreply.github.com> samyak0210 <30871632+samyak0210@users.noreply.github.com>
@soluwalana <soluwalana@gmail.com> Sam O <soluwalana@gmail.com>
@sudheerachary <sudheerachary115@gmail.com> sudheer <sudheerachary115@gmail.com>
@Tokixix <mateusztok@gmail.com> Mateusz <mateusztok@gmail.com>
@Tokixix <mateusztok@gmail.com> Tokixix <mateusztok@gmail.com>
@tttthomasssss <th0mas.ko6er@gmail.com> Thomas <th0mas.ko6er@gmail.com>
@vkk800 <vkk800@users.noreply.github.com> vkk800 <vkk800@users.noreply.github.com>
@wirew0rm <wirew0rm@users.noreply.github.com> wirew0rm <wirew0rm@users.noreply.github.com>
@xoviat <xoviat@users.noreply.github.com> xoviat <xoviat@users.noreply.github.com>
@yanxun827 <yanxun827@gmail.com> yanxun827 <yanxun827@gmail.com>
@ybeltukov <ybeltukov@gmail.com> ybeltukov <ybeltukov@gmail.com>
@ziejcow <jan.gwinner@gmail.com>ziejcow <jan.gwinner@gmail.com>
Abdullah Fayed <69167506+abdullahfayed6@users.noreply.github.com> Abdullah Sabri Fayed <69167506+abdullahfayed6@users.noreply.github.com>
ADmitri <admitri42@gmail.com> ADmitri <ADmitri42@gmail.com>
Aditya Karumanchi <karumanchi.1@osu.edu> AdityaKarumanchi <karumanchi.1@osu.edu>
Aditya Vijaykumar <vijaykumar.aditya@gmail.com> adivijaykumar <vijaykumar.aditya@gmail.com>
Adrian Raso <adrraso@ucm.es> AdrianRasoOnGit <adrraso@ucm.es>
Akash Goel <goelakas@amazon.com> Goel <goelakas@amazon.com>
Albert Steppi <albert_steppi@hms.harvard.edu> steppi <albert_steppi@hms.harvard.edu>
Aldrian Obaja <> ubuntu <>
Alex Griffing <argriffi@ncsu.edu> alex <argriffi@ncsu.edu>
Alex Griffing <argriffi@ncsu.edu> argriffing <argriffing@gmail.com>
@ -56,397 +27,177 @@ Aman Pratik <amanpratik10@gmail.com> amanp10 <amanpratik10@gmail.com>
Aman Singh <bewithaman@outlook.com> bewithaman <bewithaman@outlook.com>
Aman Thakral <aman.thakral@gmail.com> aman-thakral <aman.thakral@gmail.com>
Amato Kasahara <thisisdummy@example.com> kshramt <thisisdummy@example.com>
Anany Shrey Jain <31594632+ananyashreyjain@users.noreply.github.com> ananyashreyjain <31594632+ananyashreyjain@users.noreply.github.com>
Anders Bech Borchersen <anb@es.aau.dk> andbo <anb@es.aau.dk>
Anirudh Dagar <anirudhdagar6@gmail.com> Anirudh <anirudhdagar6@gmail.com>
Andreas Hilboll <andreas@hilboll.de> Andreas H <andreas@hilboll.de>
Andreas Hilboll <andreas@hilboll.de> Andreas Hilboll <andreas-h@users.noreply.github.com>
Anreas Weh <andreas.weh@web.de> DerWeh <andreas.weh@web.de>
Andreea Georgescu <ageorgescu@ucla.edu> Andreea_G <ageorgescu@ucla.edu>
Andriy Gelman <andriy.gelman@gmail.com> talih0 <andriy.gelman@gmail.com>
Andrew Fowlie <andrew.j.fowlie@googlemail.com> andrew <andrew.j.fowlie@googlemail.com>
Andrew Fowlie <andrew.j.fowlie@googlemail.com> Andrew Fowlie <andrewfowlie@users.noreply.github.com>
Andrew Knyazev <42650045+lobpcg@users.noreply.github.com> lobpcg <42650045+lobpcg@users.noreply.github.com>
Andrew Knyazev <42650045+lobpcg@users.noreply.github.com> Andrew Knyazev <andrew.knyazev@ucdenver.edu>
Andrew Knyazev <42650045+lobpcg@users.noreply.github.com> Andrew Knyazev, Professor Emeritus <andrew.knyazev@ucdenver.edu>
Andrew Nelson <andyfaff@gmail.com> Andrew Nelson <andrew@Andrews-MacBook-Pro.local>
Andrew Nelson <andyfaff@gmail.com> Andrew Nelson <anz@d121131.ncnr.nist.gov>
Andrew Sczesnak <andrewscz@gmail.com> polyatail <andrewscz@gmail.com>
Angeline G. Burrell <angeline.burrell@nrl.navy.mil> Angeline Burrell <aburrell@users.noreply.github.com>
Angeline G. Burrell <angeline.burrell@nrl.navy.mil> Angeline Burrell <angeline.burrell@nrl.navy.mil>
Anne Archibald <peridot.faceted@gmail.com> aarchiba <peridot.faceted@gmail.com>
Anne Archibald <peridot.faceted@gmail.com> Anne Archibald <archibald@astron.nl>
Antonio Horta Ribeiro <antonior92@gmail.com> antonio <antonior92@gmail.com>
Antonio Horta Ribeiro <antonior92@gmail.com> Antonio H Ribeiro <antonior92@gmail.com>
Anushka Suyal <126159239+anushkasuyal@users.noreply.github.com> anushkasuyal <anushkasuyal@hotmail.com>
Ariel Rokem <arokem@gmail.com> ariel.rokem <ariel.rokem@localhost>
Arnaud Baguet <107650207+quantresearch1@users.noreply.github.com> quantresearch1 <107650207+quantresearch1@users.noreply.github.com>
Arno Marty <arno.marty@etu.u-bordeaux.fr> korneix <arno.marty@etu.u-bordeaux.fr>
Arno Onken <arno.onken@iit.it> Arno Onken <asnelt@users.noreply.github.com>
Arthur Volant <arthurvolant@gmail.com> Arthur <37664438+V0lantis@users.noreply.github.com>
Ashwin Pathak <ashwinpathak20nov1996@gmail.com> ashwinpathak20 <ashwinpathak20nov1996@gmail.com>
Ashwin Pathak <ashwinpathak20nov1996@gmail.com> ashwinpathak20nov1996 <ashwinpathak20nov1996@gmail.com>
Ataf Fazledin Ahamed <rabidahamed@gmail.com> fazledyn <rabidahamed@gmail.com>
Atsushi Sakai <asakai.amsl+github@gmail.com> Atsushi Sakai <example@co.jp>
Aviv Yaish <aviv.yaish@mail.huji.ac.il> Aviv <aviv.yaish@mail.huji.ac.il>
Balint Pato <balintp@google.com> balopat <balintp@google.com>
Behzad Nouri <behzadnouri@gmail.com> behzad nouri <behzadnouri@gmail.com>
Ben Beasley <code@musicinmybrain.net> Benjamin A. Beasley <code@musicinmybrain.net>
Benjamin Root <> weathergod <>
Benjamin Santos <caos21@users.noreply.github.com> Benjamin <caos21@users.noreply.github.com>
Benny Malengier <benny.malengier@gmail.com> Benny <benny.malengier@gmail.com>
Benoît Wygas <97663334+bewygs@users.noreply.github.com> bewygs <97663334+bewygs@users.noreply.github.com>
Berkay Antmen <berkay.antmen@shopify.com> bantmen <berkay.antmen@shopify.com>
Bharat Raghunathan <bharatraghunthan9767@gmail.com> Bharat123rox <bharatraghunthan9767@gmail.com>
Bharat Raghunathan <bharatraghunthan9767@gmail.com> Bharat123Rox <bharatraghunthan9767@gmail.com>
Bhavika Tekwani <bhavicka.7992@gmail.com> bhavikat <bhavicka.7992@gmail.com>
Blair Azzopardi <blairuk@gmail.com> bsdz <blairuk@gmail.com>
Blair Azzopardi <blairuk@gmail.com> Blair Azzopardi <bsdz@users.noreply.github.com>
Boyu Liu <114795525+qqwqqw689@users.noreply.github.com> qqwqqw689 <114795525+qqwqqw689@users.noreply.github.com>
Brandon David <brandon.david@zoho.com> brandondavid <brandon.david@zoho.com>
Brett Graham <brettgraham@gmail.com> Brett <brettgraham@gmail.com>
Brett R. Murphy <bmurphy@enthought.com> brettrmurphy <bmurphy@enthought.com>
Brian Hawthorne <brian.hawthorne@localhost> brian.hawthorne <brian.hawthorne@localhost>
Brian Newsom <brian.newsom@colorado.edu> Brian Newsom <Brian.Newsom@Colorado.edu>
Caio Agiani <agianicaio@gmail.com> caioagiani <agianicaio@gmail.com>
Callum Jacob Hays <callumjhays@gmail.com> callumJHays <callumjhays@gmail.com>
Carlos Ramos Carreño <vnmabus@gmail.com> vnmabus <vnmabus@gmail.com>
Charles Jekel <cjekel@gmail.com> cjekel <cjekel@gmail.com>
Charles Masson <charles.masson@datadoghq.com> charlesmasson <charles.masson@datadoghq.com>
Chelsea Liu <chelsea.liu@datadoghq.com> Chelsea <chelsea.liu@datadoghq.com>
Chelsea Liu <chelsea.liu@datadoghq.com> chelsea.l <chelsea.liu@datadoghq.com>
Chris Burns <chris.burns@localhost> chris.burns <chris.burns@localhost>
Christoph Hohnerlein <mail@chohner.com> chohner <mail@chohner.com>
Chris Lasher <> gotgenes <>
Christian Clauss <cclauss@me.com> cclauss <cclauss@me.com>
Christoph Baumgarten <christoph.baumgarten@gmail.com> chrisb83 <33071866+chrisb83@users.noreply.github.com>
Christoph Baumgarten <christoph.baumgarten@gmail.com> chrisb83 <christoph.baumgarten@gmail.com>
Christoph Baumgarten <christoph.baumgarten@gmail.com> Christoph Baumgarten <33071866+chrisb83@users.noreply.github.com>
Christoph Baumgarten <christoph.baumgarten@gmail.com> baumgarc <christoph.baumgarten@gmail.com>
Christoph Gohlke <cgohlke@uci.edu> cgohlke <cgohlke@uci.edu>
Christoph Gohlke <cgohlke@uci.edu> Christolph Gohlke <>
Christoph Gohlke <cgohlke@uci.edu> cgholke <>
Christoph Gohlke <cgohlke@uci.edu> cgohlke <cjgohlke@gmail.com>
Christoph Gohlke <cgohlke@uci.edu> Christoph Gohlke <cjgohlke@gmail.com>
Christopher Kuster <ckuster@carrollu.edu> ckuster <ckuster@carrollu.edu>
CJ Carey <perimosocordiae@gmail.com> CJ Carey <cjcarey@google.com>
Clemens Novak <clemens@familie-novak.net> cnovak <clemens@familie-novak.net>
Clemens Novak <clemens@familie-novak.net> Clemens <clemens@familie-novak.net>
Clemens Schmid <5190547+clemisch@users.noreply.github.com> clemisch <5190547+clemisch@users.noreply.github.com>
Collin RM Stocks <> Collin Stocks <>
Collin Tokheim <collintokheim@gmail.com> ctokheim <collintokheim@gmail.com>
Cong Ma <cong.ma@obspm.fr> Cong Ma <cong.ma@uct.ac.za>
Cristrian Batrin <cristianbatrin@gmail.com> ChrisAB <cristianbatrin@gmail.com>
Daan Wynen <black.puppydog@gmx.de> Daan Wynen <black-puppydog@users.noreply.github.com>
Damian Eads <damian.eads@localhost> damian.eads <damian.eads@localhost>
David Ellis <ducksual@gmail.com> davidcellis <ducksual@gmail.com>
Daniel Garcia <daniel.garcia@suse.com> danigm <daniel.garcia@suse.com>
Daniel B. Smith <smith.daniel.br@gmail.com> Daniel Smith <smith.daniel.br@gmail.com>
Daniel B. Smith <smith.daniel.br@gmail.com> Daniel Smith <smithd.daniel.br@gmail.com>
Daniel B. Smith <smith.daniel.br@gmail.com> Daniel Smith <smithd@daniel-laptop.(none)>
Daniel B. Smith <smith.daniel.br@gmail.com> Daniel B. Smith <Daniel.Smith.Br@gmail.com>
Daniel B. Smith <smith.daniel.br@gmail.com> Daniel B. Smith <neuromathdan@gmail.com>
Daniel B. Smith <smith.daniel.br@gmail.com> Daniel <smith.daniel.br@gmail.com>
Daniel Schmitz <danielschmitzsiegen@gmail.com> dschmitz89 <danielschmitzsiegen@gmail.com>
Danilo Augusto <daniloaugusto.ita16@gmail.com> daniloagst <daniloaugusto.ita16@gmail.com>
Danilo Horta <danilo.horta@gmail.com> Horta <danilo.horta@gmail.com>
David Huard <dhuard@localhost> dhuard <dhuard@localhost>
David Simcha <> dsimcha <>
David M Cooke <> cookedm <>
David Menéndez Hurtado <davidmenhur@gmail.com> Dapid <davidmenhur@gmail.com>
David Menéndez Hurtado <davidmenhur@gmail.com> David Menéndez Hurtado <david.menendez.hurtado@scilifelab.se>
David Menéndez Hurtado <davidmenhur@gmail.com> David Menendez Hurtado <davidmenhur@gmail.com>
David Nicholson <nicholdav@gmail.com> NickleDave <nicholdav@gmail.com>
David Warde-Farley <wardefar@iro.umontreal.ca> david.warde-farley <david.warde-farley@localhost>
Deepak Kumar Gouda <deepakgouda1729@gmail.com> deepakgouda <deepakgouda1729@gmail.com>
Denis Laxalde <denis@laxalde.org> Denis Laxalde <denis.laxalde@logilab.fr>
Denis Laxalde <denis@laxalde.org> Denis Laxalde <denis.laxalde@mcgill.ca>
Denis Laxalde <denis@laxalde.org> Denis Laxalde <denis@mail.laxalde.org>
Derek Homeier <> Derek Homeir <>
Derek Homeier <> Derek Homier <>
Derrick Chambers <d-chambers@users.noreply.github.com> Derrick <d-chambers@users.noreply.github.com>
Dezmond Goff <goff.dezmond@gmail.com> Dezmond <goff.dezmond@gmail.com>
Diana Sukhoverkhova <diana.suhoverhova@mail.ru> Diana <diana.suhoverhova@mail.ru>
Dieter Werthmüller <dieter@werthmuller.org> Dieter Werthmüller <mail@werthmuller.org>
Dieter Werthmüller <dieter@werthmuller.org> Dieter Werthmüller <prisae@users.noreply.github.com>
Dieter Werthmüller <dieter@werthmuller.org> prisae <dieter@werthmuller.org>
Dima Pasechnik <dimpase@gmail.com> Dima Pasechnik <dima@pasechnik.info>
Dmitrey Kroshko <dmitrey.kroshko@localhost> dmitrey.kroshko <dmitrey.kroshko@localhost>
Domen Gorjup <domen_gorjup@hotmail.com> domengorjup <domen_gorjup@hotmail.com>
Donnie Erb <55961724+derb12@users.noreply.github.com> derb12 <55961724+derb12@users.noreply.github.com>
Dowon Yi <akahard2dj@naver.com> Dowon <akahard2dj@naver.com>
Dávid Bodnár <david.bodnar@st.ovgu.de> bdvd <david.bodnar@st.ovgu.de>
Ed Schofield <edschofield@localhost> edschofield <edschofield@localhost>
Egor Zemlyanoy <egorz734@mail.ru> egorz734 <egorz734@mail.ru>
Egor Zemlyanoy <egorz734@mail.ru> Egorz734 <egorz734@mail.ru>
Egor Zemlyanoy <egorz734@mail.ru> Egor <egorz734@mail.ru>
Ellie Litwack <ellie@PF2WXP6T.ad.bac.work> ellieLitwack <ellie@PF2WXP6T.ad.bac.work>
Eric Larson <larson.eric.d@gmail.com> Eric89GXL <larson.eric.d@gmail.com>
Eric Quintero <eric.antonio.quintero@gmail.com> e-q <eric.antonio.quintero@gmail.com>
Eric Quintero <eric.antonio.quintero@gmail.com> Eric Quintero <e-q@users.noreply.github.com>
Eric Soroos <eric-github@soroos.net> wiredfool <eric-github@soroos.net>
Eric Zitong Zhou <zitongzhou1999@gmail.com> zitongzhoueric <zitongzhou1999@gmail.com>
Étienne Tremblay <45673646+30blay@users.noreply.github.com> 30blay <45673646+30blay@users.noreply.github.com>
Evandro Bernardes <15084103+evbernardes@users.noreply.github.com> evbernardes <evbernardes@gmail.com>
Evandro Bernardes <15084103+evbernardes@users.noreply.github.com> Evandro <15084103+evbernardes@users.noreply.github.com>
Evgeni Burovski <evgeny.burovskiy@gmail.com> Zhenya <evgeni@burovski.me>
Evgeni Burovski <evgeny.burovskiy@gmail.com> Evgeni Burovski <evgeni@burovski.me>
Evan W Jones <60061381+E-W-Jones@users.noreply.github.com> Evan <60061381+E-W-Jones@users.noreply.github.com>
Fabian Pedregosa <fabian@fseoane.net> Fabian Pedregosa <fabian.pedregosa@inria.fr>
Fabian Pedregosa <fabian@fseoane.net> Fabian Pedregosa <pedregosa@google.com>
Fabian Rost <fabian.rost@tu-dresden.de> Fabian Rost <fabrost@pks.mpg.de>
Felix Berkenkamp <befelix@ethz.ch> Felix <befelix@ethz.ch>
Felix Berkenkamp <befelix@ethz.ch> Felix Berkenkamp <fberkenkamp@gmail.com>
Feras Saad <fsaad@cmu.edu> Feras A. Saad <fsaad@cmu.edu>
Frederic Renner <frederic.renner@cern.ch> Fred-Renner <74908835+Fred-Renner@users.noreply.github.com>
Florian Wilhelm <Florian.Wilhelm@gmail.com> Florian Wilhelm <Florian.Wilhelm@blue-yonder.com>
François Boulogne <fboulogne sciunto org> François Boulogne <fboulogne at april dot org>
François Boulogne <fboulogne sciunto org> François Boulogne <fboulogne@sciunto.org>
François Boulogne <fboulogne sciunto org> François Boulogne <devel@sciunto.org>
François Magimel <magimel.francois@gmail.com> François Magimel <francois.magimel@etu.enseeiht.fr>
Franz Forstmayr <forstmayr.franz@gmail.com> FranzForstmayr <franz.forstmayr@rosenberger.com>
Franz Forstmayr <forstmayr.franz@gmail.com> FranzForstmayr <forstmayr.franz@gmail.com>
Franz Forstmayr <forstmayr.franz@gmail.com> Franz Forstmayr <franz.forstmayr@rosenberger.com>
Franz Forstmayr <forstmayr.franz@gmail.com> Franz <forstmayr.franz@gmail.com>
Franziska Horn <cod3licious@users.noreply.github.com> cod3licious <cod3licious@users.noreply.github.com>
Fukumu Tsutsumi <levelfourslv@gmail.com> levelfour <levelfourslv@gmail.com>
G Young <gfyoung17@gmail.com> gfyoung <gfyoung17@gmail.com>
G Young <gfyoung17@gmail.com> gfyoung <gfyoung@mit.edu>
ਗਗਨਦੀਪ ਸਿੰਘ (Gagandeep Singh) <gdp.1807@gmail.com> czgdp1807 <gdp.1807@gmail.com>
ਗਗਨਦੀਪ ਸਿੰਘ (Gagandeep Singh) <gdp.1807@gmail.com> Gagandeep Singh <gdp.1807@gmail.com>
Ganesh Kathiresan <ganesh3597@gmail.com> ganesh-k13 <ganesh3597@gmail.com>
Garrett Reynolds <garrettreynolds5@gmail.com> Garrett-R <garrettreynolds5@gmail.com>
Gaël Varoquaux <gael.varoquaux@normalesup.org> Gael varoquaux <gael.varoquaux@normalesup.org>
Gavin Zhang <zhanggan@cn.ibm.com> GavinZhang <zhanggan@cn.ibm.com>
Gavin Zhang <zhanggan@cn.ibm.com> Gavin Zhang <zheddie@163.com>
Gauthier Berthomieu <72027971+Gautzilla@users.noreply.github.com> Gautzilla <72027971+Gautzilla@users.noreply.github.com>
Gayatri Chakkithara <113033661+redpinecube@users.noreply.github.com> redpinecube <113033661+redpinecube@users.noreply.github.com>
Geordie McBain <gdmcbain@protonmail.com> G. D. McBain <gdmcbain@protonmail.com>
Gang Zhao <zhaog6@lsec.cc.ac.cn> zhaog6 <31978442+zhaog6@users.noreply.github.com>
Gian Marco Messa <gianmarco.messa@gmail.com> messagian <gianmarco.messa@gmail.com>
Gideon Genadi Kogan <41887702+ggkogan@users.noreply.github.com> ggkogan <41887702+ggkogan@users.noreply.github.com>
Gina Helfrich <Dr-G@users.noreply.github.com> Gina <Dr-G@users.noreply.github.com>
Giorgio Patrini <giorgio.patrini@anu.edu.au> giorgiop <giorgio.patrini@anu.edu.au>
Giorgio Patrini <giorgio.patrini@anu.edu.au> giorgiop <giorgio.patrini@nicta.com.au>
Gregory R. Lee <grlee77@gmail.com> Gregory R. Lee <gregory.lee@cchmc.org>
Gregory R. Lee <grlee77@gmail.com> Gregory Lee <grlee77@gmail.com>
Golnaz Irannejad <golnazirannejad@gmail.com> golnazir <golnazirannejad@gmail.com>
Guido Imperiale <crusaderky@gmail.com> crusaderky <crusaderky@gmail.com>
Guillaume Horel <thrasibule@users.noreply.github.com> Thrasibule <thrasibule@users.noreply.github.com>
Guo Fei <<guofei9987@foxmail.com> Guofei <<guofei9987@foxmail.com>
Guus Kamphuis <guuskamphuis@gmail.com> ZoutigeWolf <guuskamphuis@gmail.com>
Hameer Abbasi <einstein.edison@gmail.com> Hameer Abbasi <hameerabbasi@yahoo.com>
Han Genuit <> 87 <>
Han Genuit <> Han <>
Harshal Prakash Patankar <pharshalp@gmail.com> pharshalp <pharshalp@gmail.com>
Hervé Audren <h.audren@gmail.com> Herve Audren <h.audren@gmail.com>
Heshy Roskes <heshyr@gmail.com> <hroskes@jhu.edu>
Heshy Roskes <heshyr@gmail.com> <jroskes1@jhu.edu>
Helder Cesar <heldercro@gmail.com> Helder <heldercro@gmail.com>
Helmut Toplitzer <helmut.toplitzer@ait.ac.at> HelmutAIT <helmut.toplitzer@ait.ac.at>
Henry Lin <hlin117@gmail.com> hlin117 <hlin117@gmail.com>
Hiroki IKEDA <ikeda_hiroki@icloud.com> IKEDA Hiroki <ikeda_hiroki@icloud.com>
Hugo van Kemenade <hugovk@users.noreply.github.com> Hugo <hugovk@users.noreply.github.com>
Huize Wang <huizew@gmail.com> Huize <huizew@gmail.com>
Huize Wang <huizew@gmail.com> Huize Wang <huizew@users.noreply.github.com>
Max Silbiger <hollowaytape@retro-type.com> hollowaytape <hollowaytape@retro-type.com>
Ilan Gold <ilanbassgold@gmail.com> ilan-gold <ilanbassgold@gmail.com>
Ion Elberdin <ionelberdin@gmail.com> Ion <ionelberdin@gmail.com>
Ilhan Polat <ilhanpolat@gmail.com> ilayn <ilhanpolat@gmail.com>
Irvin Probst <irvin.probst@ensta-bretagne.fr> I--P <irvin.probst@ensta-bretagne.fr>
Irwin Zaid <irwin.zaid@gmail.com> izaid <hi@irwinzaid.com>
Jacob Carey <jacobcvt12@gmail.com> Jacob Carey <Jacobcvt12@users.noreply.github.com>
Jacob Ogle <jacob.ogle94@outlook.com> jacobogle <jacob.ogle94@outlook.com>
Jacob Vanderplas <jakevdp@gmail.com> Jake VanderPlas <jakevdp@gmail.com>
Jacob Vanderplas <jakevdp@gmail.com> Jake Vanderplas <jakevdp@gmail.com>
Jacob Vanderplas <jakevdp@gmail.com> Jake Vanderplas <jakevdp@yahoo.com>
Jacob Vanderplas <jakevdp@gmail.com> Jake Vanderplas <vanderplas@astro.washington.edu>
Jacob Vanderplas <jakevdp@gmail.com> Jacob Vanderplas <jakevdp@yahoo.com>
Jacopo Tissino <jacopok@gmail.com> Jacopo <jacopok@gmail.com>
Jaime Fernandez del Rio <jaime.frio@gmail.com> jaimefrio <jaime.frio@gmail.com>
Jaime Fernandez del Rio <jaime.frio@gmail.com> Jaime <jaime.frio@gmail.com>
Jaime Fernandez del Rio <jaime.frio@gmail.com> Jaime Fernandez <jaimefrio@google.com>
Jaime Fernandez del Rio <jaime.frio@gmail.com> Jaime Fernandez del Rio <jaimefrio@google.com>
Jaime Fernandez del Rio <jaime.frio@gmail.com> Jaime Fernandez <jaime.frio@gmail.com>
Jaime Fernandez del Rio <jaime.frio@gmail.com> Jaime Fernandez <jaime@Jaimes-iMac.local>
Jaime Rodríguez-Guerra <jaimergp@users.noreply.github.com> jaimergp <jaimergp@users.noreply.github.com>
Jakob Jakobson <43045863+jakobjakobson13@users.noreply.github.com> Jakob Jakobson <31574479+JakobJakobson@users.noreply.github.com>
Jakob Jakobson <43045863+jakobjakobson13@users.noreply.github.com> jakobjakobson13 <43045863+jakobjakobson13@users.noreply.github.com>
Jakub Dyczek <34447984+JDkuba@users.noreply.github.com> JDkuba <34447984+JDkuba@users.noreply.github.com>
Jaime Fernandez del Rio <jaimefrio@google.com> Jaime Fernandez <jaimefrio@google.com>
James T. Webber <jamestwebber@gmail.com> jamestwebber <jamestwebber@gmail.com>
James T. Webber <jamestwebber@gmail.com> James Webber <jamestwebber@users.noreply.github.com>
James T. Webber <jamestwebber@gmail.com> James Webber <j@meswebber.com>
Jan Möseritz-Schmidt <jaro.schmidt@gmail.com> JaRoSchm <jaro.schmidt@gmail.com>
Jan Schlüter <jan.schlueter@ofai.at> Jan Schlueter <jan.schlueter@ofai.at>
Jan Schlüter <jan.schlueter@ofai.at> Jan Schlüter <github@jan-schlueter.de>
Jan Soedingrekso <jan.soedingrekso@tu-dortmund.de> sudojan <jan.soedingrekso@tu-dortmund.de>
Jan Vleeshouwers <j.m.vleeshouwers@tue.nl> janvle <j.m.vleeshouwers@tue.nl>
Jan Vleeshouwers <j.m.vleeshouwers@tue.nl> Vleeshouwers <j.m.vleeshouwers@tue.nl>
Janani Padmanabhan <jenny.stone125@gmail.com> janani <janani@janani-Vostro-3446.(none)>
Janani Padmanabhan <jenny.stone125@gmail.com> Janani <jenny.stone125@gmail.com>
Janez Demšar <janez.demsar@fri.uni-lj.si> janez <janez.demsar@fri.uni-lj.si>
Janez Demšar <janez.demsar@fri.uni-lj.si> janezd <janez.demsar@fri.uni-lj.si>
Jarrod Millman <jarrod.millman@gmail.com> Jarrod Millman <millman@berkeley.edu>
Jean-François B. <jfbu@free.fr> jfbu <jfbu@free.fr>
Jean-François B. <jfbu@free.fr> Jean-François B <jfbu@free.fr>
Jeff Armstrong <jeff@approximatrix.com> ArmstrongJ <approximatrix@gmail.com>
Jeff Armstrong <jeff@approximatrix.com> Jeff Armstrong <jeff@approximatrix.com>
Jesse Engel <jesse.engel@gmail.com> jesseengel <jesse.engel@gmail.com>
Jesse Livezey <jesse.livezey@gmail.com> Jesse Livezey <jlivezey@lbl.gov>
Jigyasu Krishnan <jigyasu@outlook.in> Jigyasu <jigyasu@outlook.in>
Jin-Guo Liu <cacate0129@gmail.com> GiggleLiu <cacate0129@gmail.com>
J.L. Lanfranchi <jll1062@phys.psu.edu> J. L. Lanfranchi <jllanfranchi@users.noreply.github.com>
J.L. Lanfranchi <jll1062@phys.psu.edu> J.L. Lanfranchi <jllanfranchi@users.noreply.github.com>
Joe Driscoll <32208193+jwd0023@users.noreply.github.com> jwd0023 <jwd0023@auburn.edu>
J.L. Lanfranchi <jllanfranchi@users.noreply.github.com> J. L. Lanfranchi <jllanfranchi@users.noreply.github.com>
Joel Nothman <joel.nothman@gmail.com> jnothman <jnothman@student.usyd.edu.au>
Joel Nothman <joel.nothman@gmail.com> Joel Nothman <jnothman@student.usyd.edu.au>
Johannes Kulick <jkkulick@amazon.de> Johannes Kulick <kulick@hildensia.de>
Johannes Schmitz <johannes.schmitz1@gmail.com> johschmitz <johannes.schmitz1@gmail.com>
Jona Sassenhagen <jona.sassenhagen@gmail.com> jona-sassenhagen <jona.sassenhagen@gmail.com>
Jonas Bosse <jonas.bosse@posteo.de> jonasBoss <jonas.bosse@posteo.de>
Jonathan Conroy <jonathanconroy14@gmail.com> jonathanconroy <jonathanconroy14@gmail.com>
Jonathan Sutton <j.sutton.mail@gmail.com> suttonje <j.sutton.mail@gmail.com>
Jonathan Sutton <j.sutton.mail@gmail.com> SUTTON Jonathan [fcs] <fcs@oil.ornl.gov>
Jonathan Sutton <j.sutton.mail@gmail.com> Jonathan Sutton <fcs@oil.ornl.gov>
Jonathan Sutton <j.sutton.mail@gmail.com> Jonathan Sutton <fcs@dell-hwqwz12>
Jonathan Sutton <j.sutton.mail@gmail.com> Jonathan Sutton <fcs@oil.ornl.gov>
Jonathan Sutton <fcs@oil.ornl.gov> SUTTON Jonathan [fcs] <fcs@oil.ornl.gov>
Jonathan Tammo Siebert <siebertjonathan@aim.com> jotasi <siebertjonathan@aim.com>
Jonathan Taylor <jonathan.taylor@localhost> jonathan.taylor <jonathan.taylor@localhost>
Jordão Bragantini <jordao.bragantini@gmail.com> Jordão Bragantini <jordao.bragantini@czbiohub.org>
Joren Hammudoglu <jhammudoglu@gmail.com> jorenham <jhammudoglu@gmail.com>
Joris Vankerschaver <joris.vankerschaver@gmail.com> Joris Vankerschaver <jvankerschaver@enthought.com>
Joscha Reimer <jor@informatik.uni-kiel.de> jor <jor@informatik.uni-kiel.de>
Josef Perktold <josef.pktd@gmail.com> josef-pktd <josef.pktd@gmail.com>
Josef Perktold <josef.pktd@gmail.com> josef <josef@localhost>
Joseph Albert <jxa357@psu.edu> jcalbert <jxa357@psu.edu>
Joseph Albert <jxa357@psu.edu> <jcacnts@gmail.com>
Joseph Albert <jxa357@psu.edu> <4261275+jcalbert@users.noreply.github.com>
Joseph Fox-Rabinovitz <joseph.r.fox-rabinovitz@nasa.gov> Mad Physicist <madphysicist@users.noreply.github.com>
Joseph Fox-Rabinovitz <joseph.r.fox-rabinovitz@nasa.gov> Joseph Fox-Rabinovitz <madphysicist@users.noreply.github.com>
Josh Lawrence <josh.k.lawrence@gmail.com> wa03 <josh.k.lawrence@gmail.com>
Josh Lefler <jlefty94@gmail.com> jlefty <jlefty94@gmail.com>
Josh Wilson <person142@users.noreply.github.com> Josh <person142@users.noreply.github.com>
Joshua Markovic <52184130+joshuamarkovic@users.noreply.github.com> joshuamarkovic <52184130+joshuamarkovic@users.noreply.github.com>
Josue Melka <yoch.melka@gmail.com> yoch <yoch.melka@gmail.com>
Juan M. Bello-Rivas <jmbr@superadditive.com> Juan M. Bello-Rivas <jmbr@users.noreply.github.com>
Juan Nunez-Iglesias <juan.nunez-iglesias@monash.edu> Juan Nunez-Iglesias <juan.n@unimelb.edu.au>
Juan Nunez-Iglesias <juan.nunez-iglesias@monash.edu> Juan Nunez-Iglesias <jni.soma@gmail.com>
Juha Remes <jremes@outlook.com> newman101 <jremes@outlook.com>
Julien Jerphanion <git@jjerphan.xyz> Julien Jerphanion (@jjerphan) <git@jjerphan.xyz>
Kai Striega <kaistriega@gmail.com> kai-striega <kaistriega@gmail.com>
Kai Striega <kaistriega@gmail.com> Kai <kaistriega@gmail.com>
Kai Striega <kaistriega@gmail.com> kai <kaistriega@gmail.com>
Kai Striega <kaistriega@gmail.com> kai-striega <kaistriega+github@gmail.com>
Kai Striega <kaistriega@gmail.com> Kai Striega <kaistriega+github@gmail.com>
Karthik Viswanath Ganti <kganti2@illinois.edu> karthik-ganti-2025 <kganti2@illinois.edu>
Kat Huang <kat@aya.yale.edu> kat <kat@aya.yale.edu>
Kenji S Emerson <psmd.iberutaru@gmail.com> Sparrow <psmd.iberutaru@gmail.com>
Kentaro Yamamoto <38549987+yamaken1343@users.noreply.github.com> yamaken <38549987+yamaken1343@users.noreply.github.com>
Kevin Richard Green <kevin.richard.green@gmail.com> kevinrichardgreen <kevin.richard.green@gmail.com>
Kirill R. <dartvader316-dev@pm.me> dartvader316 <dartvader316-dev@pm.me>
Klaus Sembritzki <klausem@gmail.com> klaus <klausem@gmail.com>
Klesk Chonkin <kleskjr@gmail.com> kleskjr <kleskjr@gmail.com>
Krzysztof Pióro <38890793+krzysztofpioro@users.noreply.github.com> krzysztofpioro <38890793+krzysztofpioro@users.noreply.github.com>
Lam Yuen Hei <lamyuenhei@gmail.com> Hei <lamyuenhei@gmail.com>
Lars Buitinck <larsmans@gmail.com> Lars <larsmans@users.noreply.github.com>
Lars Buitinck <larsmans@gmail.com> Lars Buitinck <larsmans@users.noreply.github.com>
Lars Buitinck <larsmans@gmail.com> Lars Buitinck <l.buitinck@esciencecenter.nl>
Lars Buitinck <larsmans@gmail.com> Lars Buitinck <L.J.Buitinck@uva.nl>
Lars G <lagru@mailbox.org> Lars G <lagru@users.noreply.github.com>
Lars G <lagru@mailbox.org> Lars Grueter <lagru@mailbox.org>
Lars G <lagru@mailbox.org> Lars Grüter <lagru@users.noreply.github.com>
Laurynas Mikšys <lmiksys@gmail.com> Laurynas <lmiksys@gmail.com>
Lei Ma <emptymalei@qq.com> OctoMiao <emptymalei@qq.com>
Levi John Wolf <levi.john.wolf@gmail.com> ljwolf <levi.john.wolf@gmail.com>
Liam Damewood <damewood@physics.ucdavis.edu> ldamewood <damewood@physics.ucdavis.edu>
Liming Wang <lmwang@gmail.com> lmwang <lmwang@gmail.com>
Lindsey Hiltner <lindsey.hiltner@gmail.com> L. Hiltner <lhilt@users.noreply.github.com>
Lindsey Hiltner <lindsey.hiltner@gmail.com> L Hiltner <lhilt@users.noreply.github.com>
Lijun Wang <szcfweiya@gmail.com> szcf-weiya <szcfweiya@gmail.com>
Lorenzo Luengo <> loluengo <>
Lucas Colley <lucas.colley8@gmail.com> lucascolley <51488791+lucascolley@users.noreply.github.com>
Lucas Roberts <rlucas7@vt.edu> Lucas Roberts <rlucas7@users.noreply.github.com>
Lucía Cheung <cheunglucia@gmail.com> ludcila <cheunglucia@gmail.com>
Luke Zoltan Kelley <lkelley@cfa.harvard.edu> lzkelley <lkelley@cfa.harvard.edu>
Maja Gwozdz <maja.k.gwozdz@gmail.com> mkg33 <maja.k.gwozdz@gmail.com>
Maja Gwozdz <maja.k.gwozdz@gmail.com> Maja Gwóźdź <maja.k.gwozdz@gmail.com>
Mak Sze Chun <makszechun@gmail.com> makbigc <makszechun@gmail.com>
Malayaja Chutani <42006125+malch2@users.noreply.github.com> malch2 <42006125+malch2@users.noreply.github.com>
Malik Idrees Hasan Khan <pencilartassault@hotmail.com> MalikIdreesHasa <pencilartassault@hotmail.com>
Malte Esders <git@maltimore.info> Maltimore <git@maltimore.info>
Mandeep Singh <mandeep.singh@zomato.com> Mandeep Singh <daxlab@users.noreply.github.com>
M.J. Nichol <mjnichol@alumni.uwaterloo.ca> voyager6868 <mjnichol@alumni.uwaterloo.ca>
Maniteja Nandana <manitejanmt@gmail.com> maniteja123 <manitejanmt@gmail.com>
Marc Honnorat <marc.honnorat@gmail.com> honnorat <marc.honnorat@gmail.com>
Marcello Seri <mseri@users.noreply.github.com> mseri <mseri@users.noreply.github.com>
Marco Maggi <124086916+m-maggi@users.noreply.github.com> m-maggi <124086916+m-maggi@users.noreply.github.com>
Mark E Fuller <mark.e.fuller@gmx.de> Mark E. Fuller <mark.e.fuller@gmx.de>
Mark van Rossum <mark.vanrossum@nottingham.ac.uk> vrossum <mark.vanrossum@nottingham.ac.uk>
Mark Wiebe <> Mark <>
Martin Manns <mmanns@gmx.net> manns <mmanns@gmx.net>
Martin Reinecke <martin.reinecke1@gmx.de> mreineck <martin.reinecke1@gmx.de>
Marvin Kastner <1kastner@informatik.uni-hamburg.de> 1kastner <1kastner@informatik.uni-hamburg.de>
Matt Haberland <mdhaber@mit.edu> <mhaberla@calpoly.edu>
Matt Haberland <mdhaber@mit.edu> <matthaberland@Matts-MacBook-Pro.local>
Matt Haberland <mdhaber@mit.edu> mdhaber <mdhaber@users.noreply.github.com>
Matt Knox <mattknox.ca> mattknox_ca <mattknox_ca@localhost>
Matteo Visconti <matteo.visconti.gr@dartmouth.edu> Matteo Visconti dOC <matteo.visconti.gr@dartmouth.edu>
Matthew H Flamm <matthewhflamm@gmail.com> Flamm, Matthew H <matthewhflamm@gmail.com>
Matthew H Flamm <matthewhflamm@gmail.com> MatthewFlamm <39341281+MatthewFlamm@users.noreply.github.com>
Matthew H Flamm <matthewhflamm@gmail.com> Matthew Flamm <matthewhflamm@gmail.com>
Matthias Bussonnier <bussonniermatthias@gmail.com> M Bussonnier <bussonniermatthias@gmail.com>
Mathias Zechmeister <32583239+mzechmeister@users.noreply.github.com> mzechmeister <32583239+mzechmeister@users.noreply.github.com>
Matti Picus <matti.picus@gmail.com> mattip <matti.picus@gmail.com>
Max Argus <argus.max@gmail.com> BlGene <argus.max@gmail.com>
Max Argus <argus.max@gmail.com> max argus <argus.max@gmail.com>
Max Bolingbroke <batterseapower@hotmail.com> DSG User <>
Max Bolingbroke <batterseapower@hotmail.com> Max Bolingbroke <Max.Bolingbroke@gsacapital.com>
Melissa Weber Mendonça <melissawm@gmail.com> melissawm <melissawm.github@gmail.com>
Melissa Weber Mendonça <melissawm@gmail.com> Melissa Weber Mendonça <melissawm.github@gmail.com>
Melissa Weber Mendonça <melissawm@gmail.com> Melissa Weber <melissawm.github@gmail.com>
Michael Benfield <mike.benfield@gmail.com> mikebenfield <mike.benfield@gmail.com>
Michael Droettboom <> mdroe <>
Michael Dunphy <Michael.Dunphy@dfo-mpo.gc.ca> Michael Dunphy <mdunphy@users.noreply.github.com>
Michael Hirsch <scienceopen@noreply.github.com> michael <scienceopen@noreply.github.com>
Michael Hirsch <scienceopen@noreply.github.com> Michael Hirsch <scienceopen@users.noreply.github.com>
Michael James Bedford <SunsetOrange@users.noreply.github.com> Michael <SunsetOrange@users.noreply.github.com>
Michael Marien <marien.mich@gmail.com> michaelmarien <marien.mich@gmail.com>
Miguel A. Batalla <miguelangel@batalla.pro> mabatalla <miguelangel@batalla.pro>
Mikhail Pak <mikhail.pak@tum.de> mp4096 <mikhail.pak@tum.de>
Mikhail Ryazanov <mikhail.ryazanov@gmail.com> MikhailRyazanov <mikhail.ryazanov@gmail.com>
Milad Sadeghi DM <EverLookNeverSee@Protonmail.ch> ELNS <57490926+EverLookNeverSee@users.noreply.github.com>
Mugunthan Selvanayagam <mugunthan.selvanayagam@multicorewareinc.com> Mugu~~ <mugunthan.selvanayagam@multicorewareinc.com>
Muhammad Firmansyah Kasim <firman.kasim@gmail.com> mfkasim91 <firman.kasim@gmail.com>
Nathan Bell <wnbell@localhost> wnbell <wnbell@localhost>
Nathan Woods <woodscn@lanl.gov> Charles Nathan Woods <woodscn@pn1504346.lanl.gov>
Nathan Woods <woodscn@lanl.gov> Nathan Woods <charlesnwoods@gmail.com>
Nathan Woods <woodscn@lanl.gov> Nathan Woods <woodscn@pn1504346.lanl.gov>
Neil Girdhar <mistersheik@gmail.com> Neil <mistersheik@gmail.com>
Nicholas McKibben <nicholas.bgp@gmail.com> mckib2 <nicholas.bgp@gmail.com>
Nickolai Belakovski <nbelakovski@users.noreply.github.com> nbelakovski <nbelakovski@users.noreply.github.com>
Nicky van Foreest <vanforeest@gmail.com> Nicky van Foreest <ndvanforeest@users.noreply.github.com>
Nicola Montecchio <nicola.montecchio@gmail.com> nicola montecchio <nicola.montecchio@gmail.com>
Nicolas Bloyet <nicolas.bloyet@gmail.com> theplatypus <nicolas.bloyet@gmail.com>
Nikita Karetnikov <nkaretnikov@quansight.com> Nikita Karetnikov (ニキータ カレートニコフ) <nikita@karetnikov.org>
Nikolai Nowaczyk <mail@nikno.de> Nikolai <mail@nikno.de>
Nikolas Moya <nikolasmoya@gmail.com> nmoya <nikolasmoya@gmail.com>
Nikolay Mayorov <nikolay.mayorov@zoho.com> Nikolay Mayorov <n59_ru@hotmail.com>
Nikolay Mayorov <nikolay.mayorov@zoho.com> Nikolay Mayorov <nmayorov@users.noreply.github.com>
Noel Kippers <n.kippers@catawiki.nl> RothNRK <n.kippers@catawiki.nl>
Noel Kippers <n.kippers@catawiki.nl> Noel Kippers <RothNRK@users.noreply.github.com>
Ole Bialas <38684453+OleBialas@users.noreply.github.com> Ole <38684453+OleBialas@users.noreply.github.com>
Oleksandr Pavlyk <oleksandr.pavlyk@intel.com> Oleksandr Pavlyk <oleksandr-pavlyk@users.noreply.github.com>
Orestis Floros <orestisf1993@gmail.com> Orestis <orestisf1993@gmail.com>
Pablo Winant <pablo.winant@gmail.com> pablo.winant@gmail.com <Pablo Winant>
Pamphile Roy <roy.pamphile@gmail.com> Pamphile ROY <proy@bongfish.com>
Pamphile Roy <roy.pamphile@gmail.com> Pamphile ROY <roy.pamphile@gmail.com>
Pamphile Roy <roy.pamphile@gmail.com> Tupui <23188539+tupui@users.noreply.github.com>
Param Singh <techhero724@gmail.com> PARAM SINGH <techhero724@gmail.com>
Pascal Klein <65159092+pas-calc@users.noreply.github.com> Pascal <65159092+pas-calc@users.noreply.github.com>
Patrick Snape <patricksnape@gmail.com> patricksnape <patricksnape@gmail.com>
Paul Kienzle <pkienzle@gmail.com> Paul Kienzle <pkienzle@nist.gov>
Paul van Mulbregt <pvanmulbregt@users.noreply.github.com> pvanmulbregt <pvanmulbregt@users.noreply.github.com>
Philippe DONNAT <pdonnat@hcmdom.local> pdonnat <46384882+pdonnat@users.noreply.github.com>
Peadar Coyle <peadarcoyle@googlemail.com> springcoil <peadarcoyle@googlemail.com>
Pedro López-Adeva Fernández-Layos <plopezadeva@gmail.com> plafl <plopezadeva@gmail.com>
Pedro López-Adeva Fernández-Layos <plopezadeva@gmail.com> Pedro López-Adeva Fernández-Layos <plafl@users.noreply.github.com>
@ -455,131 +206,54 @@ Per Brodtkorb <per.andreas.brodtkorb@gmail.com> pab <pab@MP815.ffi.no>
Per Brodtkorb <per.andreas.brodtkorb@gmail.com> Per A Brodtkorb <per.andreas.brodtkorb@gmail.com>
Perry Lee <mclee@aftercollege.com> Perry <mclee@aftercollege.com>
Pete Bunch <pete.bunch@gmail.com> Pete <pete.bunch@gmail.com>
Peter Bell <peterbell10@live.co.uk> peterbell10 <peterbell10@live.co.uk>
Peter Lysakovski <30794408+Lskvk@users.noreply.github.com> Lskvk <30794408+Lskvk@users.noreply.github.com>
Peter Mahler Larsen <pete.mahler.larsen@gmail.com> pmla <pete.mahler.larsen@gmail.com>
Peter Mahler Larsen <pete.mahler.larsen@gmail.com> Peter <peter.mahler.larsen@gmail.com>
Peter Mahler Larsen <pete.mahler.larsen@gmail.com> Peter Larsen <peter.mahler.larsen@gmail.com>
Peter Mahler Larsen <pete.mahler.larsen@gmail.com> pmla <peter.mahler.larsen@gmail.com>
Peyton Murray <peynmurray@gmail.com> pdmurray <peynmurray@gmail.com>
Peyton Murray <peynmurray@gmail.com> Peyton Murray <peytonmurray@gmail.com>
Philip DeBoer <philip.deboer@gmail.com> Philip DeBoer <philip_deboer@scotiacapital.com>
Phillip Weinberg <weinbe58@bu.edu> weinbe58 <weinbe58@bu.edu>
Pierre de Buyl <pdebuyl@pdebuyl.be> Pierre de Buyl <pdebuyl@ulb.ac.be>
Pierre GM <pierregm@localhost> pierregm <pierregm@localhost>
Poom Chiarawongse <eight1911@gmail.com> Poom Chiarawongse <tchiarawongs@gmail.com>
Poom Chiarawongse <eight1911@gmail.com> poom <eight1911@gmail.com>
Pratham Kumar <pratham.kumar@multicorewareinc.com> pratham-mcw <pratham.kumar@multicorewareinc.com>
Quentin Barthélemy <q.barthelemy@gmail.com> qbarthelemy <q.barthelemy@gmail.com>
Radoslaw Guzinski <radoslaw.guzinski@esa.int> radosuav <rmgu@dhi-gras.com>
Radoslaw Guzinski <radoslaw.guzinski@esa.int> radosuav <radoslaw.guzinski@esa.int>
Ralf Gommers <ralf.gommers@gmail.com> rgommers <ralf.gommers@googlemail.com>
Ralf Gommers <ralf.gommers@gmail.com> Ralf Gommers <ralf.gommers@googlemail.com>
Raphael Wettinger <ra@phael.org> raphael <ra@phael.org>
Raphael Wettinger <ra@phael.org> raphaelw <raphael.wettinger@googlemail.com>
Reidar Kind <53039431+reidarkind@users.noreply.github.com> reidarkind <53039431+reidarkind@users.noreply.github.com>
Renee Otten <reneeotten@users.noreply.github.com> reneeotten <reneeotten@users.noreply.github.com>
Reshama Shaikh <reshama.stat@gmail.com> reshamas <reshama.stat@gmail.com>
Richard Strong Bowen <rsbowen@gmail.com> rsbowen <rsbowen@gmail.com>
Richard Strong Bowen <rsbowen@gmail.com> Richard Strong Bowen <richard@geomagical.com>
Richard Gowers <richardjgowers@gmail.com> richardjgowers <richardjgowers@gmail.com>
Rick Paris <rick.paris@mlb.com> rparis <rick.paris@mlb.com>
Rob Falck <robfalck@gmail.com> rob.falck <rob.falck@localhost>
Robert David Grant <rgrant@enthought.com> Robert David Grant <robert.david.grant@gmail.com>
Robert Kern <rkern@enthought.com> Robert Kern <robert.kern@gmail.com>
Robert Uhl <robert.uhl@rwth-aachen.de> Robert Uhl <62612220+robertuhl@users.noreply.github.com>
Roman Mirochnik <roman.mirochnik@hpe.com> mirochni <roman.mirochnik@hpe.com>
Ruikang Sun <srk888666@qq.com> SunRuikang <srk888666@qq.com>
Rupak Das <dr10ru@yahoo.co.in> Rupak <dr10ru@yahoo.co.in>
Ruslan Yevdokymov <evruslan17@gmail.com> Ruslan Yevdokymov <38809160+ruslanye@users.noreply.github.com>
Ryan Gibson <ryan.alexander.gibson@gmail.com> ragibson <ryan.alexander.gibson@gmail.com>
Sagi Ezri <sagi.ezri@gmail.com> sagi-ezri <sagi.ezri@gmail.com>
Sam Lewis <sam.vr.lewis@gmail.com> Sam Lewis <samvrlewis@users.noreply.github.com>
Sam McCormack <sampmccormack@gmail.com> Sam McCormack <TheGreatCabbage@users.noreply.github.com>
Sam Mason <sam@samason.uk> Sam Mason <sam.mason@warwick.ac.uk>
Sam Rosen <7624861+SamGRosen@users.noreply.github.com> SamGRosen <7624861+SamGRosen@users.noreply.github.com>
Samuel Wallan <44255917+swallan@users.noreply.github.com> swallan <44255917+swallan@users.noreply.github.com>
Samuel Wallan <44255917+swallan@users.noreply.github.com> Sam Wallan <44255917+swallan@users.noreply.github.com>
Santi Hernandez <santi-hernandez@hotmail.com> santiher <santi-hernandez@hotmail.com>
Santi Villalba <sdvillal@gmail.com> santi <sdvillal@gmail.com>
Sara Fridovich-Keil <sfk@eecs.berkeley.edu> [Sara Fridovich-Keil] <[sfk@eecs.berkeley.edu]>
Saurabh Agarwal <shourabh.agarwal@gmail.com> saurabhkgpee <shourabh.agarwal@gmail.com>
Scott Sievert <me@scottsievert.com> scottsievert <sieve121@umn.edu>
Scott Sievert <me@scottsievert.com> <stsievert@users.noreply.github.com>
Scott Sievert <me@scottsievert.com> <scott@stsievert.com>
Scott Sievert <me@scottsievert.com> <github@stsievert.com>
Sean Cheah <cheah_sean@yahoo.com> thalassemia <cheah_sean@yahoo.com>
Sebastian Haase <> sebhaase <>
Sebastian Pucilowski <smopucilowski@gmail.com> Sebastian Pucilowski <smopucilowski@users.noreply.github.com>
Sebastian Skoupý <sebastian.skoupy@gmail.com> Sebascn <sebastian.skoupy@gmail.com>
Shivnaren Srinivasan <shivnaren@gmail.com> srinivasan <shivnaren@gmail.com>
Skipper Seabold <jsseabold@gmail.com> skip <skip@localhost>
Shinya SUZUKI <sshinya@bio.titech.ac.jp> Shinya SUZUKI <minasitawakou@gmail.com>
Smit Lunagariya <55887635+Smit-create@users.noreply.github.com> Smit-create <55887635+Smit-create@users.noreply.github.com>
Smit Lunagariya <smitlunagariya.mat18@itbhu.ac.in> Smit-create <smitlunagariya.mat18@itbhu.ac.in>
Sourav Singh <souravsingh@users.noreply.github.com> Sourav Singh <4314261+souravsingh@users.noreply.github.com>
Srikiran <srikiran@dhcp-v233-179.pv.reshsg.uci.edu> sriki18 <sriki18@users.noreply.github.com>
Stefan Endres <stefan.c.endres@gmail.com> stefan-endres <stefan.c.endres@gmail.com>
Stefan Endres <stefan.c.endres@gmail.com> Stefan Endres <Stefan.C.Endres@gmail.com>
Stefan Peterson <stefan.peterson@rubico.com> sjpet <stefan.peterson@rubico.com>
Stefan Peterson <stefan.peterson@rubico.com> Stefan Peterson <sjpet@users.noreply.github.com>
Stefan van der Walt <stefanv@berkeley.edu> Stefan van der Walt <sjvdwalt@gmail.com>
Stefan van der Walt <stefanv@berkeley.edu> Stefan van der Walt <stefan@sun.ac.za>
Steve Richardson <arichar6@gmail.com> arichar6 <arichar6@gmail.com>
Steven Adams <166521727+hugehope@users.noreply.github.com> hugehope <166521727+hugehope@users.noreply.github.com>
Sturla Molden <sturla@molden.no> sturlamolden <sturla@molden.no>
Sturla Molden <sturla@molden.no> Sturla Molden <sturlamolden@users.noreply.github.com>
Sturla Molden <sturla@molden.no> unknown <sturlamo@PK-FYS-1121C.uio.no>
Sumit Binnani <sumitbinnani.developer@gmail.com> sumitbinnani <sumitbinnani.developer@gmail.com>
Sumit Das <sumitdas1708@gmail.com> Sumit <sumitdas1708@gmail.com>
Sylvain Bellemare <sbellem@gmail.com> Sylvain Bellemare <sylvain.bellemare@ezeep.com>
Sylvain Gubian <sylvain.gubian@pmi.com> Sylvain Gubian <Sylvain.Gubian@pmi.com>
Sytse Knypstra <S.Knypstra@rug.nl> SytseK <S.Knypstra@rug.nl>
Takumasa Nakamura <n.takumasa@gmail.com> Takumasa N <n.takumasa@gmail.com>
Takuya Oshima <oshima@eng.niigata-u.ac.jp> Takuya OSHIMA <oshima@eng.niigata-u.ac.jp>
Terry Jones <terry@fluidinfo.com> terrycojones <terry@fluidinfo.com>
Thibault de Coincy <80053070+ThibaultDECO@users.noreply.github.com> ThibaultDECO <80053070+ThibaultDECO@users.noreply.github.com>
Thomas Duvernay <td75013@hotmail.fr> Patol75 <td75013@hotmail.fr>
Thomas Kluyver <takowl@gmail.com> Thomas Kluyver <thomas@kluyver.me.uk>
Thouis (Ray) Jones <thouis@gmail.com> Thouis (Ray) Jones <thouis@seas.harvard.edu>
Tiago M.D. Pereira <tiagomdp@gmail.com> tiagopereira <tiagomdp@gmail.com>
Tim Cera <tim@cerazone.net> timcera <tim@cerazone.net>
Tim Leslie <tim.leslie@gmail.com> Tim Leslie <timl@breakawayconsulting.com.au>
Tobias Megies <megies@geophysik.uni-muenchen.de> Tobias Megies <megies@users.noreply.github.com>
Tobias Schmidt <royalts@gmail.com> RoyalTS <royalts@gmail.com>
Todd Goodall <beyondmetis@gmail.com> Todd <beyondmetis@gmail.com>
Todd Jennings <toddrjen@gmail.com> Todd <toddrjen@gmail.com>
Tom Adamczewski <tadamczewskipublic@gmail.com> tadamcz <tadamczewskipublic@gmail.com>
Tom Waite <tom.waite@localhost> tom.waite <tom.waite@localhost>
Tom Donoghue <tdonoghue@ucsd.edu> TomDonoghue <tdonoghue@ucsd.edu>
Tomer Sery <tomer.sery@nextsilicon.com> Tomer.Sery <tomer.sery@nextsilicon.com>
Tony S. Yu <tsyu80@gmail.com> tonysyu <tsyu80@gmail.com>
Tony S. Yu <tsyu80@gmail.com> Tony S Yu <tsyu80@gmail.com>
Toshiki Kataoka <tos.lunar@gmail.com> Toshiki Kataoka <kataoka@preferred.jp>
Toshiki Kataoka <tos.lunar@gmail.com> tosh1ki <tosh1ki@yahoo.co.jp>
Travis Oliphant <teoliphant@gmail.com> Travis E. Oliphant <teoliphant@gmail.com>
Travis Oliphant <teoliphant@gmail.com> Travis Oliphant <oliphant@enthought.com>
Uwe Schmitt <uwe.schmitt@localhost> uwe.schmitt <uwe.schmitt@localhost>
Vicky Close <vicky.r.close@gmail.com> vickyclose <vicky.r.close@gmail.com>
Victor PM <vpecanins@gmail.com> vpecanins <vpecanins@gmail.com>
Vladyslav Rachek <wsw.raczek@gmail.com> Vladyslav Rachek <36896640+erheron@users.noreply.github.com>
Warren Weckesser <warren.weckesser@gmail.com> warren.weckesser <warren.weckesser@localhost>
Warren Weckesser <warren.weckesser@gmail.com> Warren Weckesser <warren.weckesser@enthought.com>
Warren Weckesser <warren.weckesser@gmail.com> Warren Weckesser <warren.weckesser@localhost>
Warren Weckesser <warren.weckesser@gmail.com> warren <warren.weckesser@gmail.com>
Wendy Liu <ilostwaldo@gmail.com> dellsystem <ilostwaldo@gmail.com>
WhimsyHippo <hippowiseman789@gmail.com> hippowm <hippowiseman789@gmail.com>
Will Tirone <will.tirone1@gmail.com> WillTirone <42592742+WillTirone@users.noreply.github.com>
Will Tirone <will.tirone1@gmail.com> willtirone <will.tirone1@gmail.com>
Xiao Yuan <yuanx749@gmail.com> yuanx749 <yuanx749@gmail.com>
Xingyu Liu <38244988+charlotte12l@users.noreply.github.com> 刘星雨 <liuxingyu.12@bytedance.com>
Yagiz Olmez <57116432+yagizolmez@users.noreply.github.com> yagizolmez <yagizolmez@pop-os.localdomain>
Yongcai Huang <97007177+YongcaiHuang@users.noreply.github.com> YongcaiHuang <97007177+YongcaiHuang@users.noreply.github.com>
Yu Feng <rainwoodman@gmail.com> Yu Feng <yfeng1@waterfall.dyn.berkeley.edu>
Yuji Ikeda <yuji.ikeda.ac.jp@gmail.com> yuzie007 <yuji.ikeda.ac.jp@gmail.com>
Yves-Rémi Van Eycke <yves-remi@hotmail.com> vanpact <yves-remi@hotmail.com>
Zaikun Zhang <zaikunzhang@gmail.com> zaikunzhang <zaikunzhang@gmail.com>
Zé Vinícius <jvmirca@gmail.com> Ze Vinicius <jvmirca@gmail.com>
Zhenyu Zhu <ajz34@outlook.com> Zhenyu Zhu ajz34 <ajz34@outlook.com>
Zhida Shang <57895730+futuer-szd@users.noreply.github.com> Futuer <57895730+futuer-szd@users.noreply.github.com>
Zoufiné Lauer-Bare <raszoufine@gmail.com> zolabar <raszoufine@gmail.com>

File diff suppressed because it is too large Load Diff

186
.travis.yml Normal file
View File

@ -0,0 +1,186 @@
# After changing this file, check it on:
# http://lint.travis-ci.org/
language: python
sudo: false
env:
global:
# Wheelhouse for pre-release wheels
- PRE_WHEELS="https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com"
matrix:
include:
- python: 2.7
env:
- PYFLAKES=1
- PEP8=1
- NUMPYSPEC=numpy
before_install:
- pip install pycodestyle==2.3.1
- pip install pyflakes==1.1.0
script:
- PYFLAKES_NODOCTEST=1 pyflakes scipy benchmarks/benchmarks | grep -E -v 'unable to detect undefined names|assigned to but never used|imported but unused|redefinition of unused|may be undefined, or defined from star imports' > test.out; cat test.out; test \! -s test.out
- pycodestyle scipy benchmarks/benchmarks
- python: 2.7
env:
- TESTMODE=fast
- COVERAGE=
- NUMPYSPEC="--pre --upgrade --timeout=60 -f $PRE_WHEELS numpy"
- python: 3.6
env:
- TESTMODE=full
- COVERAGE="--coverage --gcov"
- NUMPYSPEC=numpy
- python: 3.5
env:
- TESTMODE=fast
- COVERAGE=
- USE_WHEEL=1
- REFGUIDE_CHECK=1
- python: 3.4
env:
- TESTMODE=fast
- COVERAGE=
- NUMPYSPEC="numpy==1.8.2"
- USE_SDIST=1
- OPTIMIZE=-OO
- os: osx
language: generic
env:
- TESTMODE=fast
- COVERAGE=
- NUMPYSPEC=numpy
- MB_PYTHON_VERSION=3.6
addons:
apt:
packages:
- libatlas-dev
- libatlas-base-dev
- liblapack-dev
- gfortran
- libgmp-dev
- libmpfr-dev
- libsuitesparse-dev
- ccache
- swig
- libmpc-dev
cache:
directories:
- $HOME/.ccache
- $HOME/.cache/pip
- $HOME/Library/Caches/pip
before_install:
# Work in our own virtualenv to isolate from travis-ci packages.
- echo $TRAVIS_OS_NAME
- |
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
free -m
export PATH=/usr/lib/ccache:$PATH
elif [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
brew cask uninstall oclint
brew install gcc ccache libmpc
touch config.sh
git clone --depth=1 https://github.com/matthew-brett/multibuild.git
source multibuild/common_utils.sh
source multibuild/travis_steps.sh
before_install
which ccache
export PATH=/usr/local/opt/ccache/libexec:$PATH
export USE_CCACHE=1
export CCACHE_MAXSIZE=200M
export CCACHE_CPP2=1
export CFLAGS="-arch x86_64"
export CXXFLAGS="-arch x86_64"
printenv
fi
- export CCACHE_COMPRESS=1
- python --version # just to check
- export NPY_NUM_BUILD_JOBS=2
- uname -a
- df -h
- ulimit -a
- mkdir builds
- cd builds
- travis_retry pip install --upgrade pip setuptools wheel
- travis_retry pip install cython==0.25.2
- if [ -n "$NUMPYSPEC" ]; then travis_retry pip install $NUMPYSPEC; fi
- travis_retry pip install --upgrade pytest pytest-xdist pytest-faulthandler mpmath argparse Pillow codecov
- travis_retry pip install gmpy2 # speeds up mpmath (scipy.special tests)
- |
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
# optional sparse.linalg dependency (test linux only, no suitesparse installed on osx)
if [ -z "$NUMPYSPEC" ]; then
# numpy must be installed to build scikit-umfpack
travis_retry pip install numpy
fi
travis_retry pip install scikit-umfpack
if [ -z "$NUMPYSPEC" ]; then
# cleanup after ourselves
travis_retry pip uninstall -y numpy
fi
fi
- if [ "${TESTMODE}" == "full" ]; then pip install pytest-cov coverage; fi
- |
if [ "${REFGUIDE_CHECK}" == "1" ]; then
travis_retry pip install matplotlib Sphinx==1.7.2
# XXX: Install older numpy as a workaround for float printing changes.
# XXX: We'll remove this once numpy 1.14.1 is released to fix its printing
# XXX: bugs
travis_retry pip install 'numpy!=1.14.0'
fi
- python -V
- ccache -s
- cd ..
- set -o pipefail
script:
- python -c 'import numpy as np; print("relaxed strides checking:", np.ones((10,1),order="C").flags.f_contiguous)'
# Make sure that relaxed strides checking is actually in effect; otherwise fail loudly
- if [ "$NPY_RELAXED_STRIDES_CHECKING" == "1" ]; then python -c'import numpy as np; assert np.ones((10,1),order="C").flags.f_contiguous'; fi
# Test that mpmath actually uses gmpy2
- python -c 'import mpmath.libmp; assert mpmath.libmp.BACKEND == "gmpy"'
- |
if [ "${USE_WHEEL}" == "1" ]; then
# Run setup.py build before pip wheel, to build in current directory
# and make more efficient use of ccache
echo "setup.py build"
python tools/suppress_output.py python setup.py build
echo "pip wheel"
python tools/suppress_output.py pip wheel --no-build-isolation .
pip install --no-cache-dir scipy*.whl
USE_WHEEL_BUILD="--no-build"
elif [ "${USE_SDIST}" == "1" ]; then
echo "setup.py sdist"
python tools/suppress_output.py python setup.py sdist
# Move out of source directory to avoid finding local scipy
cd dist
# Use pip --build option to make ccache work better.
# However, this option is partially broken
# (see https://github.com/pypa/pip/issues/4242)
# and some shenanigans are needed to make it work.
echo "pip install"
python ../tools/suppress_output.py pip install --no-cache-dir --no-build-isolation --build="$HOME/builds" --upgrade "file://`echo -n $PWD/scipy*`#egg=scipy" -v
cd ..
USE_WHEEL_BUILD="--no-build"
fi
- export SCIPY_AVAILABLE_MEM=3G
- python -u $OPTIMIZE runtests.py -g -m $TESTMODE $COVERAGE $USE_WHEEL_BUILD -- -rfEX -n 3 2>&1 | tee runtests.log
- tools/validate_runtests_log.py $TESTMODE < runtests.log
- if [ "${REFGUIDE_CHECK}" == "1" ]; then python runtests.py -g --refguide-check; fi
# Check dynamic symbol hiding works on Linux
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then ./tools/check_pyext_symbol_hiding.sh build; fi
after_script:
- ccache -s
# Upload coverage information
- |
if [ -n "${COVERAGE}" ]; then
RUN_DIR=`echo build/testenv/lib/python*/site-packages`
# Produce gcov output for codecov to find
find build -name '*.gcno' -type f -exec gcov -pb {} +
mv *.gcov "$RUN_DIR/"
# Run codecov
pushd "$RUN_DIR"
codecov -X gcov
popd
fi
notifications:
# Perhaps we should have status emails sent to the mailing list, but
# let's wait to see what people think before turning that on.
email: false

30
BENTO_BUILD.txt Normal file
View File

@ -0,0 +1,30 @@
No-frill version:
* Clone bento::
$ git clone git://github.com/cournape/Bento.git bento
* Bootstrap bento::
$ cd bento && python bootstrap.py
* Clone Waf::
$ git clone https://code.google.com/p/waf/
$ git checkout waf-1.7.13 # waf breaks API regularly, this version works
* Set the WAFDIR environment variable to the base dir of the waf repo you
just created (in your bash_login for example if you're going to build with
Bento often). This is unfortunately needed, Waf is not installable like a
regular Python package::
$ export WAFDIR=ROOT_OF_WAF_REPO
# WAFDIR should be such as $WAFDIR/waflib exists
* Build scipy with Bento::
$ BENTO_ROOT/bentomaker build -j 4 # 4 threads in parallel
# or with progress bar
$ BENTO_ROOT/bentomaker build -p
# or with verbose output
$ BENTO_ROOT/bentomaker build -v

View File

@ -1,24 +0,0 @@
@ARTICLE{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
url = {https://doi.org/10.1038/s41592-019-0686-2},
adsurl = {https://ui.adsabs.harvard.edu/abs/2020NatMe..17..261V},
doi = {10.1038/s41592-019-0686-2},
}

View File

@ -2,24 +2,24 @@
SciPy pull request guidelines
=============================
Pull requests are always welcome, and the SciPy community appreciates
Pull requests are always welcome, and the Scipy community appreciates
any help you give. Note that a code of conduct applies to all spaces
managed by the SciPy project, including issues and pull requests:
https://docs.scipy.org/doc/scipy/dev/conduct/code_of_conduct.html
https://github.com/scipy/scipy/blob/master/doc/source/dev/conduct/code_of_conduct.rst.
When submitting a pull request, we ask you to check the following:
When submitting a pull request, we ask you check the following:
1. **Unit tests**, **documentation**, and **code style** are in order.
1. **Unit tests**, **documentation**, and **code style** are in order.
For details, please read
https://docs.scipy.org/doc/scipy/dev/hacking.html
https://github.com/scipy/scipy/blob/master/HACKING.rst.txt
It's also OK to submit work in progress if you're unsure of what
this exactly means, in which case you'll likely be asked to make
some further changes.
2. The contributed code will be **licensed under SciPy's license**,
https://github.com/scipy/scipy/blob/main/LICENSE.txt.
https://github.com/scipy/scipy/blob/master/LICENSE.txt
If you did not write the code yourself, you ensure the existing
license is compatible and include the license information in the
contributed files, or obtain permission from the original
contributed files, or obtain a permission from the original
author to relicense the contributed code.

519
HACKING.rst.txt Normal file
View File

@ -0,0 +1,519 @@
=====================
Contributing to SciPy
=====================
This document aims to give an overview of how to contribute to SciPy. It
tries to answer commonly asked questions, and provide some insight into how the
community process works in practice. Readers who are familiar with the SciPy
community and are experienced Python coders may want to jump straight to the
`git workflow`_ documentation.
There are a lot of ways you can contribute:
- Contributing new code
- Fixing bugs and other maintenance work
- Improving the documentation
- Reviewing open pull requests
- Triaging issues
- Working on the `scipy.org`_ website
- Answering questions and participating on the scipy-dev and scipy-user
`mailing lists`_.
Contributing new code
=====================
If you have been working with the scientific Python toolstack for a while, you
probably have some code lying around of which you think "this could be useful
for others too". Perhaps it's a good idea then to contribute it to SciPy or
another open source project. The first question to ask is then, where does
this code belong? That question is hard to answer here, so we start with a
more specific one: *what code is suitable for putting into SciPy?*
Almost all of the new code added to scipy has in common that it's potentially
useful in multiple scientific domains and it fits in the scope of existing
scipy submodules. In principle new submodules can be added too, but this is
far less common. For code that is specific to a single application, there may
be an existing project that can use the code. Some scikits (`scikit-learn`_,
`scikit-image`_, `statsmodels`_, etc.) are good examples here; they have a
narrower focus and because of that more domain-specific code than SciPy.
Now if you have code that you would like to see included in SciPy, how do you
go about it? After checking that your code can be distributed in SciPy under a
compatible license (see FAQ for details), the first step is to discuss on the
scipy-dev mailing list. All new features, as well as changes to existing code,
are discussed and decided on there. You can, and probably should, already
start this discussion before your code is finished.
Assuming the outcome of the discussion on the mailing list is positive and you
have a function or piece of code that does what you need it to do, what next?
Before code is added to SciPy, it at least has to have good documentation, unit
tests and correct code style.
1. Unit tests
In principle you should aim to create unit tests that exercise all the code
that you are adding. This gives some degree of confidence that your code
runs correctly, also on Python versions and hardware or OSes that you don't
have available yourself. An extensive description of how to write unit
tests is given in the NumPy `testing guidelines`_.
2. Documentation
Clear and complete documentation is essential in order for users to be able
to find and understand the code. Documentation for individual functions
and classes -- which includes at least a basic description, type and
meaning of all parameters and returns values, and usage examples in
`doctest`_ format -- is put in docstrings. Those docstrings can be read
within the interpreter, and are compiled into a reference guide in html and
pdf format. Higher-level documentation for key (areas of) functionality is
provided in tutorial format and/or in module docstrings. A guide on how to
write documentation is given in `how to document`_.
3. Code style
Uniformity of style in which code is written is important to others trying
to understand the code. SciPy follows the standard Python guidelines for
code style, `PEP8`_. In order to check that your code conforms to PEP8,
you can use the `pep8 package`_ style checker. Most IDEs and text editors
have settings that can help you follow PEP8, for example by translating
tabs by four spaces. Using `pyflakes`_ to check your code is also a good
idea.
At the end of this document a checklist is given that may help to check if your
code fulfills all requirements for inclusion in SciPy.
Another question you may have is: *where exactly do I put my code*? To answer
this, it is useful to understand how the SciPy public API (application
programming interface) is defined. For most modules the API is two levels
deep, which means your new function should appear as
``scipy.submodule.my_new_func``. ``my_new_func`` can be put in an existing or
new file under ``/scipy/<submodule>/``, its name is added to the ``__all__``
list in that file (which lists all public functions in the file), and those
public functions are then imported in ``/scipy/<submodule>/__init__.py``. Any
private functions/classes should have a leading underscore (``_``) in their
name. A more detailed description of what the public API of SciPy is, is given
in `SciPy API`_.
Once you think your code is ready for inclusion in SciPy, you can send a pull
request (PR) on Github. We won't go into the details of how to work with git
here, this is described well in the `git workflow`_ section of the NumPy
documentation and on the `Github help pages`_. When you send the PR for a new
feature, be sure to also mention this on the scipy-dev mailing list. This can
prompt interested people to help review your PR. Assuming that you already got
positive feedback before on the general idea of your code/feature, the purpose
of the code review is to ensure that the code is correct, efficient and meets
the requirements outlined above. In many cases the code review happens
relatively quickly, but it's possible that it stalls. If you have addressed
all feedback already given, it's perfectly fine to ask on the mailing list
again for review (after a reasonable amount of time, say a couple of weeks, has
passed). Once the review is completed, the PR is merged into the "master"
branch of SciPy.
The above describes the requirements and process for adding code to SciPy. It
doesn't yet answer the question though how decisions are made exactly. The
basic answer is: decisions are made by consensus, by everyone who chooses to
participate in the discussion on the mailing list. This includes developers,
other users and yourself. Aiming for consensus in the discussion is important
-- SciPy is a project by and for the scientific Python community. In those
rare cases that agreement cannot be reached, the maintainers of the module
in question can decide the issue.
Contributing by helping maintain existing code
==============================================
The previous section talked specifically about adding new functionality to
SciPy. A large part of that discussion also applies to maintenance of existing
code. Maintenance means fixing bugs, improving code quality or style,
documenting existing functionality better, adding missing unit tests, keeping
build scripts up-to-date, etc. The SciPy `issue list`_ contains all
reported bugs, build/documentation issues, etc. Fixing issues
helps improve the overall quality of SciPy, and is also a good way
of getting familiar with the project. You may also want to fix a bug because
you ran into it and need the function in question to work correctly.
The discussion on code style and unit testing above applies equally to bug
fixes. It is usually best to start by writing a unit test that shows the
problem, i.e. it should pass but doesn't. Once you have that, you can fix the
code so that the test does pass. That should be enough to send a PR for this
issue. Unlike when adding new code, discussing this on the mailing list may
not be necessary - if the old behavior of the code is clearly incorrect, no one
will object to having it fixed. It may be necessary to add some warning or
deprecation message for the changed behavior. This should be part of the
review process.
Reviewing pull requests
=======================
Reviewing open pull requests (PRs) is very welcome, and a valuable way to help
increase the speed at which the project moves forward. If you have specific
knowledge/experience in a particular area (say "optimization algorithms" or
"special functions") then reviewing PRs in that area is especially valuable -
sometimes PRs with technical code have to wait for a long time to get merged
due to a shortage of appropriate reviewers.
We encourage everyone to get involved in the review process; it's also a
great way to get familiar with the code base. Reviewers should ask
themselves some or all of the following questions:
- Was this change adequately discussed (relevant for new features and changes
in existing behavior)?
- Is the feature scientifically sound? Algorithms may be known to work based on
literature; otherwise, closer look at correctness is valuable.
- Is the intended behavior clear under all conditions (e.g. unexpected inputs
like empty arrays or nan/inf values)?
- Does the code meet the quality, test and documentation expectation outline
under `Contributing new code`_?
If we do not know you yet, consider introducing yourself.
Other ways to contribute
========================
There are many ways to contribute other than contributing code.
Triaging issues (investigating bug reports for validity and possible actions to
take) is also a useful activity. SciPy has many hundreds of open issues;
closing invalid ones and correctly labeling valid ones (ideally with some first
thoughts in a comment) allows prioritizing maintenance work and finding related
issues easily when working on an existing function or submodule.
Participating in discussions on the scipy-user and scipy-dev `mailing lists`_ is
a contribution in itself. Everyone who writes to those lists with a problem or
an idea would like to get responses, and writing such responses makes the
project and community function better and appear more welcoming.
The `scipy.org`_ website contains a lot of information on both SciPy the
project and SciPy the community, and it can always use a new pair of hands.
The sources for the website live in their own separate repo:
https://github.com/scipy/scipy.org
Recommended development setup
=============================
Since Scipy contains parts written in C, C++, and Fortran that need to be
compiled before use, make sure you have the necessary compilers and Python
development headers installed. Having compiled code also means that importing
Scipy from the development sources needs some additional steps, which are
explained below.
First fork a copy of the main Scipy repository in Github onto your own
account and then create your local repository via::
$ git clone git@github.com:YOURUSERNAME/scipy.git scipy
$ cd scipy
$ git remote add upstream git://github.com/scipy/scipy.git
To build the development version of Scipy and run tests, spawn
interactive shells with the Python import paths properly set up etc.,
do one of::
$ python runtests.py -v
$ python runtests.py -v -s optimize
$ python runtests.py -v -t scipy.special.tests.test_basic::test_xlogy
$ python runtests.py --ipython
$ python runtests.py --python somescript.py
$ python runtests.py --bench
This builds Scipy first, so the first time it may take some time. If
you specify ``-n``, the tests are run against the version of Scipy (if
any) found on current PYTHONPATH. *Note: if you run into a build issue,
more detailed build documentation can be found in :doc:`building/index` and at
https://github.com/scipy/scipy/tree/master/doc/source/building*
Using ``runtests.py`` is the recommended approach to running tests.
There are also a number of alternatives to it, for example in-place
build or installing to a virtualenv. See the FAQ below for details.
Some of the tests in Scipy are very slow and need to be separately
enabled. See the FAQ below for details.
SciPy structure
===============
All SciPy modules should follow the following conventions. In the
following, a *SciPy module* is defined as a Python package, say
``yyy``, that is located in the scipy/ directory.
* Ideally, each SciPy module should be as self-contained as possible.
That is, it should have minimal dependencies on other packages or
modules. Even dependencies on other SciPy modules should be kept to
a minimum. A dependency on NumPy is of course assumed.
* Directory ``yyy/`` contains:
- A file ``setup.py`` that defines
``configuration(parent_package='',top_path=None)`` function
for `numpy.distutils`.
- A directory ``tests/`` that contains files ``test_<name>.py``
corresponding to modules ``yyy/<name>{.py,.so,/}``.
* Private modules should be prefixed with an underscore ``_``,
for instance ``yyy/_somemodule.py``.
* User-visible functions should have good documentation following
the Numpy documentation style, see `how to document`_
* The ``__init__.py`` of the module should contain the main reference
documentation in its docstring. This is connected to the Sphinx
documentation under ``doc/`` via Sphinx's automodule directive.
The reference documentation should first give a categorized list of
the contents of the module using ``autosummary::`` directives, and
after that explain points essential for understanding the use of the
module.
Tutorial-style documentation with extensive examples should be
separate, and put under ``doc/source/tutorial/``
See the existing Scipy submodules for guidance.
For further details on Numpy distutils, see:
https://github.com/numpy/numpy/blob/master/doc/DISTUTILS.rst.txt
Useful links, FAQ, checklist
============================
Checklist before submitting a PR
--------------------------------
- Are there unit tests with good code coverage?
- Do all public function have docstrings including examples?
- Is the code style correct (PEP8, pyflakes)
- Is the commit message `formatted correctly`_?
- Is the new functionality tagged with ``.. versionadded:: X.Y.Z`` (with
X.Y.Z the version number of the next release - can be found in setup.py)?
- Is the new functionality mentioned in the release notes of the next
release?
- Is the new functionality added to the reference guide?
- In case of larger additions, is there a tutorial or more extensive
module-level description?
- In case compiled code is added, is it integrated correctly via setup.py
(and preferably also Bento configuration files - bento.info and bscript)?
- If you are a first-time contributor, did you add yourself to THANKS.txt?
Please note that this is perfectly normal and desirable - the aim is to
give every single contributor credit, and if you don't add yourself it's
simply extra work for the reviewer (or worse, the reviewer may forget).
- Did you check that the code can be distributed under a BSD license?
Useful SciPy documents
----------------------
- The `how to document`_ guidelines
- NumPy/SciPy `testing guidelines`_
- `SciPy API`_
- The `SciPy Roadmap`_
- NumPy/SciPy `git workflow`_
- How to submit a good `bug report`_
FAQ
---
*I based my code on existing Matlab/R/... code I found online, is this OK?*
It depends. SciPy is distributed under a BSD license, so if the code that you
based your code on is also BSD licensed or has a BSD-compatible license (e.g.
MIT, PSF) then it's OK. Code which is GPL or Apache licensed, has no
clear license, requires citation or is free for academic use only can't be
included in SciPy. Therefore if you copied existing code with such a license
or made a direct translation to Python of it, your code can't be included.
If you're unsure, please ask on the scipy-dev mailing list.
*Why is SciPy under the BSD license and not, say, the GPL?*
Like Python, SciPy uses a "permissive" open source license, which allows
proprietary re-use. While this allows companies to use and modify the software
without giving anything back, it is felt that the larger user base results in
more contributions overall, and companies often publish their modifications
anyway, without being required to. See John Hunter's `BSD pitch`_.
*How do I set up a development version of SciPy in parallel to a released
version that I use to do my job/research?*
One simple way to achieve this is to install the released version in
site-packages, by using a binary installer or pip for example, and set
up the development version in a virtualenv. First install
`virtualenv`_ (optionally use `virtualenvwrapper`_), then create your
virtualenv (named scipy-dev here) with::
$ virtualenv scipy-dev
Now, whenever you want to switch to the virtual environment, you can use the
command ``source scipy-dev/bin/activate``, and ``deactivate`` to exit from the
virtual environment and back to your previous shell. With scipy-dev
activated, install first Scipy's dependencies::
$ pip install Numpy pytest Cython
After that, you can install a development version of Scipy, for example via::
$ python setup.py install
The installation goes to the virtual environment.
*How do I set up an in-place build for development*
For development, you can set up an in-place build so that changes made to
``.py`` files have effect without rebuild. First, run::
$ python setup.py build_ext -i
Then you need to point your PYTHONPATH environment variable to this directory.
Some IDEs (Spyder for example) have utilities to manage PYTHONPATH. On Linux
and OSX, you can run the command::
$ export PYTHONPATH=$PWD
and on Windows
$ set PYTHONPATH=/path/to/scipy
Now editing a Python source file in SciPy allows you to immediately
test and use your changes (in ``.py`` files), by simply restarting the
interpreter.
*Are there any video examples for installing from source, setting up a
development environment, etc...?*
Currently, there are two video demonstrations for Anaconda Python on macOS:
`Anaconda SciPy Dev Part I (macOS)`_ is a four-minute
overview of installing Anaconda, building SciPy from source, and testing
changes made to SciPy from the Spyder IDE.
`Anaconda SciPy Dev Part II (macOS)`_ shows how to use
a virtual environment to easily switch between the "pre-built version" of SciPy
installed with Anaconda and your "source-built version" of SciPy created
according to Part I.
*Are there any video examples of the basic development workflow?*
`SciPy Development Workflow`_ is a five-minute example of fixing a bug and
submitting a pull request. While it's intended as a followup to
`Anaconda SciPy Dev Part I (macOS)`_ and `Anaconda SciPy Dev Part II (macOS)`_,
the process is similar for other development setups.
*Can I use a programming language other than Python to speed up my code?*
Yes. The languages used in SciPy are Python, Cython, C, C++ and Fortran. All
of these have their pros and cons. If Python really doesn't offer enough
performance, one of those languages can be used. Important concerns when
using compiled languages are maintainability and portability. For
maintainability, Cython is clearly preferred over C/C++/Fortran. Cython and C
are more portable than C++/Fortran. A lot of the existing C and Fortran code
in SciPy is older, battle-tested code that was only wrapped in (but not
specifically written for) Python/SciPy. Therefore the basic advice is: use
Cython. If there's specific reasons why C/C++/Fortran should be preferred,
please discuss those reasons first.
*How do I debug code written in C/C++/Fortran inside Scipy?*
The easiest way to do this is to first write a Python script that
invokes the C code whose execution you want to debug. For instance
``mytest.py``::
from scipy.special import hyp2f1
print(hyp2f1(5.0, 1.0, -1.8, 0.95))
Now, you can run::
gdb --args python runtests.py -g --python mytest.py
If you didn't compile with debug symbols enabled before, remove the
``build`` directory first. While in the debugger::
(gdb) break cephes_hyp2f1
(gdb) run
The execution will now stop at the corresponding C function and you
can step through it as usual. Instead of plain ``gdb`` you can of
course use your favourite alternative debugger; run it on the
``python`` binary with arguments ``runtests.py -g --python mytest.py``.
*How do I enable additional tests in Scipy?*
Some of the tests in Scipy's test suite are very slow and not enabled
by default. You can run the full suite via::
$ python runtests.py -g -m full
This invokes the test suite ``import scipy; scipy.test("full")``,
enabling also slow tests.
There is an additional level of very slow tests (several minutes),
which are disabled also in this case. They can be enabled by setting
the environment variable ``SCIPY_XSLOW=1`` before running the test
suite.
.. _scikit-learn: http://scikit-learn.org
.. _scikit-image: http://scikit-image.org/
.. _statsmodels: http://statsmodels.sourceforge.net/
.. _testing guidelines: https://github.com/numpy/numpy/blob/master/doc/TESTS.rst.txt
.. _formatted correctly: http://docs.scipy.org/doc/numpy/dev/gitwash/development_workflow.html#writing-the-commit-message
.. _how to document: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
.. _bug report: http://scipy.org/bug-report.html
.. _PEP8: http://www.python.org/dev/peps/pep-0008/
.. _pep8 package: http://pypi.python.org/pypi/pep8
.. _pyflakes: http://pypi.python.org/pypi/pyflakes
.. _SciPy API: https://docs.scipy.org/doc/scipy/reference/api.html
.. _SciPy Roadmap: https://scipy.github.io/devdocs/roadmap.html
.. _git workflow: http://docs.scipy.org/doc/numpy/dev/gitwash/index.html
.. _Github help pages: https://help.github.com/articles/set-up-git/
.. _issue list: https://github.com/scipy/scipy/issues
.. _Github: https://github.com/scipy/scipy
.. _scipy.org: https://scipy.org/
.. _scipy.github.com: http://scipy.github.com/
.. _scipy.org-new: https://github.com/scipy/scipy.org-new
.. _documentation wiki: https://docs.scipy.org/scipy/Front%20Page/
.. _SciPy Central: http://scipy-central.org/
.. _doctest: http://www.doughellmann.com/PyMOTW/doctest/
.. _virtualenv: http://www.virtualenv.org/
.. _virtualenvwrapper: http://www.doughellmann.com/projects/virtualenvwrapper/
.. _bsd pitch: http://nipy.sourceforge.net/nipy/stable/faq/johns_bsd_pitch.html
.. _Pytest: https://pytest.org/
.. _mailing lists: https://www.scipy.org/scipylib/mailing-lists.html
.. _Anaconda SciPy Dev Part I (macOS): https://youtu.be/1rPOSNd0ULI
.. _Anaconda SciPy Dev Part II (macOS): https://youtu.be/Faz29u5xIZc
.. _SciPy Development Workflow: https://youtu.be/HgU01gJbzMY

253
INSTALL.rst.txt Normal file
View File

@ -0,0 +1,253 @@
Building and installing SciPy
+++++++++++++++++++++++++++++
See https://www.scipy.org/Installing_SciPy/
.. Contents::
INTRODUCTION
============
It is *strongly* recommended that you use either a complete scientific Python
distribution or binary packages on your platform if they are available, in
particular on Windows and Mac OS X. You should not attempt to build SciPy if
you are not familiar with compiling software from sources.
Recommended distributions are:
- Enthought Canopy (https://www.enthought.com/products/canopy/)
- Anaconda (https://www.anaconda.com)
- Python(x,y) (http://python-xy.github.io/)
- WinPython (https://winpython.github.io/)
The rest of this install documentation summarizes how to build Scipy. Note
that more extensive (and possibly more up-to-date) build instructions are
maintained at http://scipy.org/scipylib/building/index.html
PREREQUISITES
=============
SciPy requires the following software installed for your platform:
1) Python__ 2.7 or >= 3.4
__ http://www.python.org
2) NumPy__ >= 1.8.2
__ http://www.numpy.org/
3) For building from source: setuptools__
__ https://github.com/pypa/setuptools
4) If you want to build the documentation: Sphinx__ >= 1.2.1
__ http://sphinx-doc.org/
5) If you want to build SciPy master or other unreleased version from source
(Cython-generated C sources are included in official releases):
Cython__ >= 0.23.4
__ http://cython.org/
Windows
-------
Compilers
~~~~~~~~~
There are two ways to build Scipy on Windows:
1. Use Intel MKL, and Intel compilers or ifort + MSVC. This is what Anaconda
and Enthought Canopy use.
2. Use MSVC + gfortran with OpenBLAS. This is how the SciPy Windows wheels are
built.
Mac OS X
--------
Compilers
~~~~~~~~~
It is recommended to use gcc or clang, both work fine. Gcc is available for
free when installing Xcode, the developer toolsuite on Mac OS X. You also
need a fortran compiler, which is not included with Xcode: you should use a
recent gfortran from an OS X package manager (like Homebrew).
Please do NOT use gfortran from `hpc.sourceforge.net <http://hpc.sourceforge.net>`_,
it is known to generate buggy scipy binaries.
Blas/Lapack
~~~~~~~~~~~
Mac OS X includes the Accelerate framework: it should be detected without any
intervention when building SciPy.
Linux
-----
Most common distributions include all the dependencies. You will need to
install a BLAS/LAPACK (all of ATLAS, OpenBLAS, MKL work fine) including
development headers, as well as development headers for Python itself. Those
are typically packaged as python-dev
INSTALLING SCIPY
================
For the latest information, see the web site:
https://www.scipy.org
Development version from Git
----------------------------
Use the command::
git clone https://github.com/scipy/scipy.git
cd scipy
git clean -xdf
python setup.py install --user
Documentation
-------------
Type::
cd scipy/doc
make html
From tarballs
-------------
Unpack ``SciPy-<version>.tar.gz``, change to the ``SciPy-<version>/``
directory, and run::
pip install . -v --user
This may take several minutes to half an hour depending on the speed of your
computer.
TESTING
=======
To test SciPy after installation (highly recommended), execute in Python
>>> import scipy
>>> scipy.test()
To run the full test suite use
>>> scipy.test('full')
If you are upgrading from an older SciPy release, please test your code for any
deprecation warnings before and after upgrading to avoid surprises:
$ python -Wd -c my_code_that_shouldnt_break.py
Please note that you must have version 1.0 or later of the Pytest test
framework installed in order to run the tests. More information about Pytest is
available on the website__.
__ https://pytest.org/
COMPILER NOTES
==============
You can specify which Fortran compiler to use by using the following
install command::
python setup.py config_fc --fcompiler=<Vendor> install
To see a valid list of <Vendor> names, run::
python setup.py config_fc --help-fcompiler
IMPORTANT: It is highly recommended that all libraries that scipy uses (e.g.
BLAS and ATLAS libraries) are built with the same Fortran compiler. In most
cases, if you mix compilers, you will not be able to import Scipy at best, have
crashes and random results at worst.
UNINSTALLING
============
When installing with ``python setup.py install`` or a variation on that, you do
not get proper uninstall behavior for an older already installed Scipy version.
In many cases that's not a problem, but if it turns out to be an issue, you
need to manually uninstall it first (remove from e.g. in
``/usr/lib/python3.4/site-packages/scipy`` or
``$HOME/lib/python3.4/site-packages/scipy``).
Alternatively, you can use ``pip install . --user`` instead of ``python
setup.py install --user`` in order to get reliable uninstall behavior.
The downside is that ``pip`` doesn't show you a build log and doesn't support
incremental rebuilds (it copies the whole source tree to a tempdir).
TROUBLESHOOTING
===============
If you experience problems when building/installing/testing SciPy, you
can ask help from scipy-user@python.org or scipy-dev@python.org mailing
lists. Please include the following information in your message:
NOTE: You can generate some of the following information (items 1-5,7)
in one command::
python -c 'from numpy.f2py.diagnose import run; run()'
1) Platform information::
python -c 'import os, sys; print(os.name, sys.platform)'
uname -a
OS, its distribution name and version information
etc.
2) Information about C,C++,Fortran compilers/linkers as reported by
the compilers when requesting their version information, e.g.,
the output of
::
gcc -v
g77 --version
3) Python version::
python -c 'import sys; print(sys.version)'
4) NumPy version::
python -c 'import numpy; print(numpy.__version__)'
5) ATLAS version, the locations of atlas and lapack libraries, building
information if any. If you have ATLAS version 3.3.6 or newer, then
give the output of the last command in
::
cd scipy/Lib/linalg
python setup_atlas_version.py build_ext --inplace --force
python -c 'import atlas_version'
7) The output of the following commands
::
python INSTALLDIR/numpy/distutils/system_info.py
where INSTALLDIR is, for example, /usr/lib/python3.4/site-packages/.
8) Feel free to add any other relevant information.
For example, the full output (both stdout and stderr) of the SciPy
installation command can be very helpful. Since this output can be
rather large, ask before sending it into the mailing list (or
better yet, to one of the developers, if asked).
9) In case of failing to import extension modules, the output of
::
ldd /path/to/ext_module.so
can be useful.

View File

@ -1,30 +1,193 @@
Copyright (c) 2001-2002 Enthought, Inc. 2003, SciPy Developers.
Copyright (c) 2001, 2002 Enthought, Inc.
All rights reserved.
Copyright (c) 2003-2017 SciPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of Enthought nor the names of the SciPy Developers
may be used to endorse or promote products derived from this software
without specific prior written permission.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
SciPy bundles a number of libraries that are compatibly licensed. We list
these here.
Name: Numpydoc
Files: doc/sphinxext/numpydoc/*
License: 2-clause BSD
For details, see doc/sphinxext/LICENSE.txt
Name: scipy-sphinx-theme
Files: doc/scipy-sphinx-theme/*
License: 3-clause BSD, PSF and Apache 2.0
For details, see doc/sphinxext/LICENSE.txt
Name: Six
Files: scipy/_lib/six.py
License: MIT
For details, see the header inside scipy/_lib/six.py
Name: Decorator
Files: scipy/_lib/decorator.py
License: 2-clause BSD
For details, see the header inside scipy/_lib/decorator.py
Name: ID
Files: scipy/linalg/src/id_dist/*
License: 3-clause BSD
For details, see scipy/linalg/src/id_dist/doc/doc.tex
Name: L-BFGS-B
Files: scipy/optimize/lbfgsb/*
License: BSD license
For details, see scipy/optimize/lbfgsb/README
Name: SuperLU
Files: scipy/sparse/linalg/dsolve/SuperLU/*
License: 3-clause BSD
For details, see scipy/sparse/linalg/dsolve/SuperLU/License.txt
Name: ARPACK
Files: scipy/sparse/linalg/eigen/arpack/ARPACK/*
License: 3-clause BSD
For details, see scipy/sparse/linalg/eigen/arpack/ARPACK/COPYING
Name: Qhull
Files: scipy/spatial/qhull/*
License: Qhull license (BSD-like)
For details, see scipy/spatial/qhull/COPYING.txt
Name: Cephes
Files: scipy/special/cephes/*
License: 3-clause BSD
Distributed under 3-clause BSD license with permission from the author,
see https://lists.debian.org/debian-legal/2004/12/msg00295.html
Cephes Math Library Release 2.8: June, 2000
Copyright 1984, 1995, 2000 by Stephen L. Moshier
This software is derived from the Cephes Math Library and is
incorporated herein by permission of the author.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Name: Faddeeva
Files: scipy/special/Faddeeva.*
License: MIT
Copyright (c) 2012 Massachusetts Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Name: qd
Files: scipy/special/cephes/dd_*.[ch]
License: modified BSD license ("BSD-LBNL-License.doc")
This work was supported by the Director, Office of Science, Division
of Mathematical, Information, and Computational Sciences of the
U.S. Department of Energy under contract numbers DE-AC03-76SF00098 and
DE-AC02-05CH11231.
Copyright (c) 2003-2009, The Regents of the University of California,
through Lawrence Berkeley National Laboratory (subject to receipt of
any required approvals from U.S. Dept. of Energy) All rights reserved.
1. Redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following
conditions are met:
(1) Redistributions of source code must retain the copyright
notice, this list of conditions and the following disclaimer.
(2) Redistributions in binary form must reproduce the copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
(3) Neither the name of the University of California, Lawrence
Berkeley National Laboratory, U.S. Dept. of Energy nor the names
of its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
2. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3. You are under no obligation whatsoever to provide any bug fixes,
patches, or upgrades to the features, functionality or performance of
the source code ("Enhancements") to anyone; however, if you choose to
make your Enhancements available either publicly, or directly to
Lawrence Berkeley National Laboratory, without imposing a separate
written license agreement for such Enhancements, then you hereby grant
the following license: a non-exclusive, royalty-free perpetual license
to install, use, modify, prepare derivative works, incorporate into
other computer software, distribute, and sublicense such enhancements
or derivative works thereof, in binary and source code form.

View File

@ -1,186 +0,0 @@
----
The SciPy repository and source distributions bundle a number of libraries that
are compatibly licensed. We list these here.
Name: fast_matrix_market
Files: scipy/io/_fast_matrix_market/*
License: BSD-2-Clause
For details, see scipy/io/_fast_matrix_market/LICENSE.txt
Name: pystreambuf
Files: scipy/io/_fast_matrix_market/src/pystreambuf.h
License: BSD-3-Clause
For details, see the header inside scipy/io/_fast_matrix_market/src/pystreambuf.h
Name: fast_float
Files: scipy/io/_fast_matrix_market/fast_matrix_market/dependencies/fast_float/*
License: MIT
For details, see scipy/io/_fast_matrix_market/fast_matrix_market/dependencies/fast_float/LICENSE-MIT
Name: ryu
Files: scipy/io/_fast_matrix_market/fast_matrix_market/dependencies/ryu/*
License: BSL-1.0
For details, see scipy/io/_fast_matrix_market/fast_matrix_market/dependencies/ryu/LICENSE-Boost
Name: L-BFGS-B
Files: scipy/optimize/lbfgsb/[__lbfgsb.h,__lbfgsb.c]
License: BSD-3-Clause
For details, see scipy/optimize/__lbfgsb.c
Name: LAPJVsp
Files: scipy/sparse/csgraph/_matching.pyx
License: BSD-3-Clause
Copyright 1987-, A. Volgenant/Amsterdam School of Economics,
University of Amsterdam
Distributed under 3-clause BSD license with permission from
University of Amsterdam.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Name: SuperLU
Files: scipy/sparse/linalg/_dsolve/SuperLU/*
License: BSD-3-Clause
For details, see scipy/sparse/linalg/_dsolve/SuperLU/License.txt
Name: ARPACK
Files: scipy/sparse/linalg/_eigen/arpack/ARPACK/*
License: BSD-3-Clause
For details, see scipy/sparse/linalg/_eigen/arpack/ARPACK/COPYING
Name: Qhull
Files: subprojects/qhull_r/libqhull_r/*
License: Qhull
For details, see subprojects/qhull_r/libqhull_r/COPYING.txt
Name: xsf
Files: subprojects/xsf/*
License: BSD-3-Clause AND MIT AND BSD-3-Clause-LBNL AND Apache-2.0 WITH LLVM-exception
For details, see subprojects/xsf/LICENSE and
subprojects/xsf/LICENSES_bundled.txt
Name: pypocketfft
Files: scipy/fft/_pocketfft/pypocketfft.cxx
License: BSD-3-Clause
For details, see scipy/fft/_pocketfft/LICENSE.md
Name: uarray
Files: scipy/_lib/_uarray/*
License: BSD-3-Clause
For details, see scipy/_lib/_uarray/LICENSE
Name: ampgo
Files: benchmarks/benchmarks/go_benchmark_functions/*.py
License: MIT
Functions for testing global optimizers, forked from the AMPGO project,
https://code.google.com/archive/p/ampgo
Name: pybind11
Files: no source files are included, however pybind11 binary artifacts are
included with every binary build of SciPy.
License: BSD-3-Clause
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>, All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Name: HiGHS
Files: subprojects/highs/*
License: MIT
For details, see subprojects/highs/LICENSE.txt
Name: Boost
Files: subprojects/boost_math/math/*
License: BSL-1.0
For details, see subprojects/boost_math/math/LICENSE
Name: Biasedurn
Files: scipy/stats/biasedurn/*
License: BSD-3-Clause
For details, see scipy/stats/biasedurn/license.txt
Name: UNU.RAN
Files: scipy/_lib/unuran/*
License: BSD-3-Clause
For details, see scipy/_lib/unuran/license.txt
Name: NumPy
Files: scipy/stats/libnpyrandom/[logfactorial.h,logfactorial.c]
License: BSD-3-Clause
For details, see header inside scipy/stats/libnpyrandom/logfactorial.h
and scipy/stats/libnpyrandom/logfactorial.c
Name: array-api-compat
Files: scipy/_lib/array_api_compat/*
License: MIT
For details, see scipy/_lib/array_api_compat/LICENSE
Name: Tempita
Files: scipy/_build_utils/tempita/*
License: MIT
For details, see scipy/_build_utils/tempita/LICENSE.txt
Name: Chebfun
Files: scipy/interpolate/[_bary_rational.py, tests/test_bary_rational.py]
License: BSD-3-Clause
For details, see scipy/interpolate/_bary_rational.py
Name: getLebedevSphere
Files: scipy/integrate/_lebedev.py
License: BSD-2-Clause
For details, see scipy/integrate/_lebedev.py
Name: prima
Files: scipy/_lib/pyprima
License: BSD-3-Clause
For details, see scipy/_lib/pyprima/LICENCE.txt

26
MANIFEST.in Normal file
View File

@ -0,0 +1,26 @@
include MANIFEST.in
include *.txt
# Top-level build scripts
include setup.py bscript bento.info
# All source files
recursive-include scipy *
# All documentation
recursive-include doc *
# Add build and testing tools
include tox.ini pytest.ini
recursive-include tools *
# Cached Cython signatures
include cythonize.dat
# Exclude what we don't want to include
recursive-exclude scipy/linalg/src/id_dist/src *_subr_*.f
prune benchmarks/env
prune benchmarks/results
prune benchmarks/html
prune benchmarks/scipy
prune scipy/special/tests/data/boost
prune scipy/special/tests/data/gsl
prune scipy/special/tests/data/local
prune doc/build
prune doc/source/generated
prune */__pycache__
global-exclude *.pyc *~ *.bak *.swp *.pyo

View File

@ -1,46 +1,31 @@
.. image:: https://raw.githubusercontent.com/scipy/scipy/main/doc/source/_static/logo.svg
:target: https://scipy.org
:width: 110
:height: 110
:align: left
SciPy
=====
.. image:: https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A
:target: https://numfocus.org
.. image:: https://img.shields.io/travis/scipy/scipy/master.svg?label=Travis%20CI
:target: https://travis-ci.org/scipy/scipy/
.. image:: https://img.shields.io/pypi/dm/scipy.svg?label=Pypi%20downloads
:target: https://pypi.org/project/scipy/
.. image:: https://img.shields.io/appveyor/ci/scipy/scipy/master.svg?label=AppVeyor
:target: https://ci.appveyor.com/project/scipy/scipy
.. image:: https://img.shields.io/conda/dn/conda-forge/scipy.svg?label=Conda%20downloads
:target: https://anaconda.org/conda-forge/scipy
.. image:: https://img.shields.io/circleci/project/github/scipy/scipy/master.svg?label=CircleCI
:target: https://circleci.com/gh/scipy/scipy
.. image:: https://img.shields.io/badge/stackoverflow-Ask%20questions-blue.svg
:target: https://stackoverflow.com/questions/tagged/scipy
.. image:: https://codecov.io/gh/scipy/scipy/branch/master/graph/badge.svg
:target: https://codecov.io/gh/scipy/scipy
.. image:: https://img.shields.io/badge/DOI-10.1038%2Fs41592--019--0686--2-blue.svg
:target: https://www.nature.com/articles/s41592-019-0686-2
.. image:: https://insights.linuxfoundation.org/api/badge/health-score?project=scipy
:target: https://insights.linuxfoundation.org/project/scipy
SciPy (pronounced "Sigh Pie") is an open-source software for mathematics,
science, and engineering. It includes modules for statistics, optimization,
SciPy (pronounced "Sigh Pie") is open-source software for mathematics,
science, and engineering. It includes modules for statistics, optimization,
integration, linear algebra, Fourier transforms, signal and image processing,
ODE solvers, and more.
- **Website:** https://scipy.org
- **Documentation:** https://docs.scipy.org/doc/scipy/
- **Development version of the documentation:** https://scipy.github.io/devdocs
- **SciPy development forum:** https://discuss.scientific-python.org/c/contributor/scipy
- **Stack Overflow:** https://stackoverflow.com/questions/tagged/scipy
- **Source code:** https://github.com/scipy/scipy
- **Contributing:** https://scipy.github.io/devdocs/dev/index.html
- **Website (including documentation):** https://www.scipy.org/
- **Mailing list:** http://scipy.org/scipylib/mailing-lists.html
- **Source:** https://github.com/scipy/scipy
- **Bug reports:** https://github.com/scipy/scipy/issues
- **Code of Conduct:** https://docs.scipy.org/doc/scipy/dev/conduct/code_of_conduct.html
- **Report a security vulnerability:** https://tidelift.com/docs/security
- **Citing in your work:** https://www.scipy.org/citing-scipy/
SciPy is built to work with
NumPy arrays, and provides many user-friendly and efficient numerical routines,
SciPy depends on NumPy, which provides convenient and fast
N-dimensional array manipulation. SciPy is built to work with
NumPy arrays, and provides many user-friendly and efficient numerical routines
such as routines for numerical integration and optimization. Together, they
run on all popular operating systems, are quick to install, and are free of
charge. NumPy and SciPy are easy to use, but powerful enough to be depended
@ -48,32 +33,17 @@ upon by some of the world's leading scientists and engineers. If you need to
manipulate numbers on a computer and display or publish the results, give
SciPy a try!
For the installation instructions, see `our install
guide <https://scipy.org/install/>`__.
For installation instructions, see ``INSTALL.rst.txt``.
Developer information
---------------------
Call for Contributions
----------------------
If you would like to take part in SciPy development, take a look
at the file ``CONTRIBUTING``.
We appreciate and welcome contributions. Small improvements or fixes are always appreciated; issues labeled as "good
first issue" may be a good starting point. Have a look at `our contributing
guide <https://scipy.github.io/devdocs/dev/index.html>`__.
License information
-------------------
Writing code isnt the only way to contribute to SciPy. You can also:
- review pull requests
- triage issues
- develop tutorials, presentations, and other educational materials
- maintain and improve `our website <https://github.com/scipy/scipy.org>`__
- develop graphic design for our brand assets and promotional materials
- help with outreach and onboard new contributors
- write grant proposals and help with other fundraising efforts
If youre unsure where to start or how your skills fit in, reach out! You can
ask on the `forum <https://discuss.scientific-python.org/c/contributor/scipy>`__
or here, on GitHub, by leaving a comment on a relevant issue that is already
open.
If you are new to contributing to open source, `this
guide <https://opensource.guide/how-to-contribute/>`__ helps explain why, what,
and how to get involved.
See the file ``LICENSE.txt`` for information on the history of this
software, terms & conditions for usage, and a DISCLAIMER OF ALL
WARRANTIES.

204
THANKS.txt Normal file
View File

@ -0,0 +1,204 @@
SciPy is an open source library of routines for science and engineering
using Python. It is a community project sponsored by Enthought, Inc.
SciPy originated with code contributions by Travis Oliphant, Pearu
Peterson, and Eric Jones. Travis Oliphant and Eric Jones each contributed
about half the initial code. Pearu Peterson developed f2py, which is the
integral to wrapping the many Fortran libraries used in SciPy.
Since then many people have contributed to SciPy, both in code development,
suggestions, and financial support. Below is a partial list. If you've
been left off, please email the "SciPy Developers List" <scipy-dev@python.org>.
Please add names as needed so that we can keep up with all the contributors.
Kumar Appaiah for Dolph Chebyshev window.
Nathan Bell for sparsetools, help with scipy.sparse and scipy.splinalg.
Robert Cimrman for UMFpack wrapper for sparse matrix module.
David M. Cooke for improvements to system_info, and LBFGSB wrapper.
Aric Hagberg for ARPACK wrappers, help with splinalg.eigen.
Chuck Harris for Zeros package in optimize (1d root-finding algorithms).
Prabhu Ramachandran for improvements to gui_thread.
Robert Kern for improvements to stats and bug-fixes.
Jean-Sebastien Roy for fmin_tnc code which he adapted from Stephen Nash's
original Fortran.
Ed Schofield for Maximum entropy and Monte Carlo modules, help with
sparse matrix module.
Travis Vaught for numerous contributions to annual conference and community
web-site and the initial work on stats module clean up.
Jeff Whitaker for Mac OS X support.
David Cournapeau for bug-fixes, refactoring of fftpack and cluster,
implementing the numscons and Bento build support, building Windows
binaries and adding single precision FFT.
Damian Eads for hierarchical clustering, dendrogram plotting,
distance functions in spatial package, vq documentation.
Anne Archibald for kd-trees and nearest neighbor in scipy.spatial.
Pauli Virtanen for Sphinx documentation generation, online documentation
framework and interpolation bugfixes.
Josef Perktold for major improvements to scipy.stats and its test suite and
fixes and tests to optimize.curve_fit and leastsq.
David Morrill for getting the scoreboard test system up and running.
Louis Luangkesorn for providing multiple tests for the stats module.
Jochen Kupper for the zoom feature in the now-deprecated plt plotting module.
Tiffany Kamm for working on the community web-site.
Mark Koudritsky for maintaining the web-site.
Andrew Straw for help with the web-page, documentation, packaging,
testing and work on the linalg module.
Stefan van der Walt for numerous bug-fixes, testing and documentation.
Jarrod Millman for release management, community coordination, and code
clean up.
Pierre Gerard-Marchant for statistical masked array functionality.
Alan McIntyre for updating SciPy tests to use the new NumPy test framework.
Matthew Brett for work on the Matlab file IO, bug-fixes, and improvements
to the testing framework.
Gary Strangman for the scipy.stats package.
Tiziano Zito for generalized symmetric and hermitian eigenvalue problem
solver.
Chris Burns for bug-fixes.
Per Brodtkorb for improvements to stats distributions.
Neilen Marais for testing and bug-fixing in the ARPACK wrappers.
Johannes Loehnert and Bart Vandereycken for fixes in the linalg
module.
David Huard for improvements to the interpolation interface.
David Warde-Farley for converting the ndimage docs to ReST.
Uwe Schmitt for wrapping non-negative least-squares.
Ondrej Certik for Debian packaging.
Paul Ivanov for porting Numeric-style C code to the new NumPy API.
Ariel Rokem for contributions on percentileofscore fixes and tests.
Yosef Meller for tests in the optimization module.
Ralf Gommers for release management, code clean up and improvements
to doc-string generation.
Bruce Southey for bug-fixes and improvements to scipy.stats.
Ernest Adrogué for the Skellam distribution.
Enzo Michelangeli for a fast kendall tau test.
David Simcha for a fisher exact test.
Warren Weckesser for bug-fixes, cleanups, and several new features.
Fabian Pedregosa for linear algebra bug-fixes, new features and refactoring.
Jake Vanderplas for wrapping ARPACK's generalized and shift-invert modes
and improving its tests.
Collin RM Stocks for wrapping pivoted QR decomposition.
Martin Teichmann for improving scipy.special.ellipk & agm accuracy,
and for linalg.qr_multiply.
Jeff Armstrong for discrete state-space and linear time-invariant functionality
in scipy.signal, and sylvester/riccati/lyapunov solvers in scipy.linalg.
Mark Wiebe for fixing type casting after changes in Numpy.
Andrey Smirnov for improvements to FIR filter design.
Anthony Scopatz for help with code review and merging.
Lars Buitinck for improvements to scipy.sparse and various other modules.
Scott Sinclair for documentation improvements and some bug fixes.
Gael Varoquaux for cleanups in scipy.sparse.
Skipper Seabold for a fix to special.gammainc.
Wes McKinney for a fix to special.gamma.
Thouis (Ray) Jones for bug fixes in ndimage.
Yaroslav Halchenko for a bug fix in ndimage.
Thomas Robitaille for the IDL 'save' reader.
Fazlul Shahriar for fixes to the NetCDF3 I/O.
Chris Jordan-Squire for bug fixes, documentation improvements and
scipy.special.logit & expit.
Christoph Gohlke for many bug fixes and help with Windows specific issues.
Jacob Silterra for cwt-based peak finding in scipy.signal.
Denis Laxalde for the unified interface to minimizers in scipy.optimize.
David Fong for the sparse LSMR solver.
Andreas Hilboll for adding several new interpolation methods.
Andrew Schein for improving the numerical precision of norm.logcdf().
Robert Gantner for improving expm() implementation.
Sebastian Werk for Halley's method in newton().
Bjorn Forsman for contributing signal.bode().
Tony S. Yu for ndimage improvements.
Jonathan J. Helmus for work on ndimage.
Alex Reinhart for documentation improvements.
Patrick Varilly for cKDTree improvements.
Sturla Molden for cKDTree improvements.
Nathan Crock for bug fixes.
Steven G. Johnson for Faddeeva W and erf* implementations.
Lorenzo Luengo for whosmat() in scipy.io.
Eric Moore for orthogonal polynomial recurrences in scipy.special.
Jacob Stevenson for the basinhopping optimization algorithm
Daniel Smith for sparse matrix functionality improvements
Gustav Larsson for a bug fix in convolve2d.
Alex Griffing for expm 2009, expm_multiply, expm_frechet,
trust region optimization methods, and sparse matrix onenormest
implementations, plus bugfixes.
Nils Werner for signal windowing and wavfile-writing improvements.
Kenneth L. Ho for the wrapper around the Interpolative Decomposition code.
Juan Luis Cano for refactorings in lti, sparse docs improvements and some
trivial fixes.
Pawel Chojnacki for simple documentation fixes.
Gert-Ludwig Ingold for contributions to special functions.
Joris Vankerschaver for multivariate Gaussian functionality.
Rob Falck for the SLSQP interface and linprog.
Jörg Dietrich for the k-sample Anderson Darling test.
Blake Griffith for improvements to scipy.sparse.
Andrew Nelson for scipy.optimize.differential_evolution.
Brian Newsom for work on ctypes multivariate integration.
Nathan Woods for work on multivariate integration.
Brianna Laugher for bug fixes.
Johannes Kulick for the Dirichlet distribution.
Bastian Venthur for bug fixes.
Alex Rothberg for stats.combine_pvalues.
Brandon Liu for stats.combine_pvalues.
Clark Fitzgerald for namedtuple outputs in scipy.stats.
Florian Wilhelm for usage of RandomState in scipy.stats distributions.
Robert T. McGibbon for Levinson-Durbin Toeplitz solver, Hessian information
from L-BFGS-B.
Alex Conley for the Exponentially Modified Normal distribution.
Abraham Escalante for contributions to scipy.stats
Johannes Ballé for the generalized normal distribution.
Irvin Probst (ENSTA Bretagne) for pole placement.
Ian Henriksen for Cython wrappers for BLAS and LAPACK
Fukumu Tsutsumi for bug fixes.
J.J. Green for interpolation bug fixes.
François Magimel for documentation improvements.
Josh Levy-Kramer for the log survival function of the hypergeometric distribution
Will Monroe for bug fixes.
Bernardo Sulzbach for bug fixes.
Alexander Grigorevskiy for adding extra LAPACK least-square solvers and
modifying linalg.lstsq function accordingly.
Sam Lewis for enhancements to the basinhopping module.
Tadeusz Pudlik for documentation and vectorizing spherical Bessel functions.
Philip DeBoer for wrapping random SO(N) and adding random O(N) and
correlation matrices in scipy.stats.
Tyler Reddy and Nikolai Nowaczyk for scipy.spatial.SphericalVoronoi
Bill Sacks for fixes to netcdf i/o.
Kolja Glogowski for a bug fix in scipy.special.
Surhud More for enhancing scipy.optimize.curve_fit to accept covariant errors
on data.
Antonio H. Ribeiro for implementing iirnotch, iirpeak functions and
trust-exact and trust-constr optimization methods.
Matt Haberland for the interior point linear programming method and
SciPy development videos.
Ilhan Polat for bug fixes on Riccati solvers.
Sebastiano Vigna for code in the stats package related to Kendall's tau.
John Draper for bug fixes.
Alvaro Sanchez-Gonzalez for axis-dependent modes in multidimensional filters.
Alessandro Pietro Bardelli for improvements to pdist/cdist and to related tests.
Jonathan T. Siebert for bug fixes.
Thomas Keck for adding new scipy.stats distributions used in HEP
David Nicholson for bug fixes in spectral functions.
Roman Feldbauer for improvements in scipy.sparse
Dominic Antonacci for statistics documentation.
David Hagen for the object-oriented ODE solver interface.
Arno Onken for contributions to scipy.stats.
Cathy Douglass for bug fixes in ndimage.
Adam Cox for contributions to scipy.constants.
Charles Masson for the Wasserstein and the Cramér-von Mises statistical
distances.
Felix Lenders for implementing trust-trlib method.
Dezmond Goff for adding optional out parameter to pdist/cdist
Nick R. Papior for allowing a wider choice of solvers
Sean Quinn for the Moyal distribution
Lars Grüter for contributions to peak finding in scipy.signal
Jordan Heemskerk for exposing additional windowing functions in scipy.signal.
Michael Tartre (Two Sigma Investments) for contributions to weighted distance functions.
Institutions
------------
Enthought for providing resources and finances for development of SciPy.
Brigham Young University for providing resources for students to work on SciPy.
Agilent which gave a genereous donation for support of SciPy.
UC Berkeley for providing travel money and hosting numerous sprints.
The University of Stellenbosch for funding the development of
the SciKits portal.
Google Inc. for updating documentation of hypergeometric distribution.
Datadog Inc. for contributions to scipy.stats.
Urthecast Inc. for exposing additional windowing functions in scipy.signal.

194
appveyor.yml Normal file
View File

@ -0,0 +1,194 @@
# As config was originally based on an example by Olivier Grisel. Thanks!
# https://github.com/ogrisel/python-appveyor-demo/blob/master/appveyor.yml
clone_depth: 50
# No reason for us to restrict the number concurrent jobs
max_jobs: 100
cache:
- '%LOCALAPPDATA%\pip\Cache'
environment:
global:
MINGW_32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin
MINGW_64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
OPENBLAS_32: https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-5f998ef_gcc7_1_0_win32.zip
OPENBLAS_64: https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-5f998ef_gcc7_1_0_win64.zip
NUMPY_HEAD: https://github.com/numpy/numpy.git
NUMPY_BRANCH: master
APPVEYOR_SAVE_CACHE_ON_ERROR: true
APPVEYOR_SKIP_FINALIZE_ON_EXIT: true
TEST_TIMEOUT: 1000
matrix:
- PYTHON: C:\Python36
PYTHON_VERSION: 3.6
PYTHON_ARCH: 32
TEST_MODE: fast
- PYTHON: C:\Python27-x64
PYTHON_VERSION: 2.7
PYTHON_ARCH: 64
TEST_MODE: fast
- PYTHON: C:\Python34-x64
PYTHON_VERSION: 3.4
PYTHON_ARCH: 64
TEST_MODE: fast
- PYTHON: C:\Python36-x64
PYTHON_VERSION: 3.6
PYTHON_ARCH: 64
TEST_MODE: full
- PYTHON: C:\Python27
PYTHON_VERSION: 2.7
PYTHON_ARCH: 32
SKIP_NOTAG: true
TEST_MODE: full
- PYTHON: C:\Python34
PYTHON_VERSION: 3.4
PYTHON_ARCH: 32
SKIP_NOTAG: true
TEST_MODE: full
- PYTHON: C:\Python35-x64
PYTHON_VERSION: 3.5
PYTHON_ARCH: 64
SKIP_NOTAG: true
TEST_MODE: full
- PYTHON: C:\Python35
PYTHON_VERSION: 3.5
PYTHON_ARCH: 32
SKIP_NOTAG: true
TEST_MODE: full
init:
- "ECHO %PYTHON% %PYTHON_VERSION% %PYTHON_ARCH%"
- "ECHO \"%APPVEYOR_SCHEDULED_BUILD%\""
# If there is a newer build queued for the same PR, cancel this one.
# The AppVeyor 'rollout builds' option is supposed to serve the same
# purpose but it is problematic because it tends to cancel builds pushed
# directly to master instead of just PR builds (or the converse).
# credits: JuliaLang developers.
- ps: if ($env:APPVEYOR_PULL_REQUEST_NUMBER -and $env:APPVEYOR_BUILD_NUMBER -ne ((Invoke-RestMethod `
https://ci.appveyor.com/api/projects/$env:APPVEYOR_ACCOUNT_NAME/$env:APPVEYOR_PROJECT_SLUG/history?recordsNumber=50).builds | `
Where-Object pullRequestId -eq $env:APPVEYOR_PULL_REQUEST_NUMBER)[0].buildNumber) { `
Write-Host "There are newer queued builds for this pull request, skipping build."
Exit-AppveyorBuild
}
- ps: |
If (($env:SKIP_NOTAG -eq "true") -and ($env:APPVEYOR_REPO_TAG -ne "true")) {
Write-Host "Skipping build, not at a tag."
Exit-AppveyorBuild
}
install:
- C:\cygwin\bin\du -hs "%LOCALAPPDATA%\pip\Cache"
# Prepend newly installed Python to the PATH of this build (this cannot be
# done from inside the powershell script as it would require to restart
# the parent CMD process).
- SET PATH=%PYTHON%;%PYTHON%\Scripts;%PATH%
# Check that we have the expected version and architecture for Python
- python --version
- >-
%CMD_IN_ENV%
python -c "import sys,platform,struct;
print(sys.platform, platform.machine(), struct.calcsize('P') * 8, )"
# Install the BLAS library
# - install "openblas.lib" to PYTHON\lib
# - install OpenBLAS.dll to MINGW\bin
- ps: |
$PYTHON_ARCH = $env:PYTHON_ARCH
$PYTHON = $env:PYTHON
If ($PYTHON_ARCH -eq 32) {
$OPENBLAS = $env:OPENBLAS_32
} Else {
$OPENBLAS = $env:OPENBLAS_64
}
$clnt = new-object System.Net.WebClient
$file = "$(New-TemporaryFile).zip"
$tmpdir = New-TemporaryFile | %{ rm $_; mkdir $_ }
$destination = "$PYTHON\lib\openblas.a"
echo $file
echo $tmpdir
echo $OPENBLAS
$clnt.DownloadFile($OPENBLAS,$file)
Get-FileHash $file | Format-List
Expand-Archive $file $tmpdir
rm $tmpdir\$PYTHON_ARCH\lib\*.dll.a
$lib = ls $tmpdir\$PYTHON_ARCH\lib\*.a | ForEach { ls $_ } | Select-Object -first 1
echo $lib
cp $lib $destination
ls $destination
# Upgrade to the latest pip.
- '%CMD_IN_ENV% python -m pip install -U pip setuptools wheel'
# Install the scipy test dependencies.
- '%CMD_IN_ENV% pip install -U --timeout 5 --retries 2 -r tools/ci/appveyor/requirements.txt'
# Replace numpy distutils with a version that can build with msvc + mingw-gfortran
- ps: |
$NumpyDir = $((python -c 'import os; import numpy; print(os.path.dirname(numpy.__file__))') | Out-String).Trim()
rm -r -Force "$NumpyDir\distutils"
$tmpdir = New-TemporaryFile | %{ rm $_; mkdir $_ }
echo $env:NUMPY_HEAD
echo $env:NUMPY_BRANCH
git clone -q --depth=1 -b $env:NUMPY_BRANCH $env:NUMPY_HEAD $tmpdir
mv $tmpdir\numpy\distutils $NumpyDir
build_script:
- ps: |
$PYTHON_ARCH = $env:PYTHON_ARCH
If ($PYTHON_ARCH -eq 32) {
$MINGW = $env:MINGW_32
} Else {
$MINGW = $env:MINGW_64
}
$env:Path += ";$MINGW"
$env:NPY_NUM_BUILD_JOBS = "4"
mkdir dist
pip wheel --no-build-isolation -v -v -v --wheel-dir=dist .
ls dist -r | Foreach-Object {
appveyor PushArtifact $_.FullName
pip install $_.FullName
}
test_script:
- python runtests.py -n -m %TEST_MODE% -- -n6 --timeout=%TEST_TIMEOUT% --junitxml=%cd%\junit-results.xml -rfEX
after_build:
# Remove old or huge cache files to hopefully not exceed the 1GB cache limit.
#
# If the cache limit is reached, the cache will not be updated (of not even
# created in the first run). So this is a trade of between keeping the cache
# current and having a cache at all.
# NB: This is done only `on_success` since the cache in uploaded only on
# success anyway.
- C:\cygwin\bin\find "%LOCALAPPDATA%\pip" -type f -mtime +360 -delete
- C:\cygwin\bin\find "%LOCALAPPDATA%\pip" -type f -size +10M -delete
- C:\cygwin\bin\find "%LOCALAPPDATA%\pip" -empty -delete
# Show size of cache
- C:\cygwin\bin\du -hs "%LOCALAPPDATA%\pip\Cache"
on_finish:
- ps: |
If (Test-Path .\junit-results.xml) {
(new-object net.webclient).UploadFile(
"https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)",
(Resolve-Path .\junit-results.xml)
)
}
$LastExitCode = 0

View File

@ -4,34 +4,41 @@
SciPy benchmarks
================
Benchmarking SciPy with Airspeed Velocity.
Benchmarking Scipy with Airspeed Velocity.
Usage
-----
Airspeed Velocity manages building and Python environments by itself,
Airspeed Velocity manages building and Python virtualenvs by itself,
unless told otherwise. Some of the benchmarking features in
``spin`` also tell ASV to use the SciPy compiled by
``spin``. To run the benchmarks, you do not need to install a
development version of SciPy to your current Python environment.
``runtests.py`` also tell ASV to use the Scipy compiled by
``runtests.py``. To run the benchmarks, you do not need to install a
development version of Scipy to your current Python environment.
Run a benchmark against currently checked-out SciPy version (don't record the
Run a benchmark against currently checked out Scipy version (don't record the
result)::
spin bench --submodule sparse.Arithmetic
python runtests.py --bench sparse.Arithmetic
Compare change in benchmark results with another branch::
Compare change in benchmark results to another branch::
spin bench --compare main --submodule sparse.Arithmetic
python runtests.py --bench-compare master sparse.Arithmetic
Run ASV commands directly (note, this will not set env vars for ``ccache``
and disabling BLAS/LAPACK multi-threading, as ``spin`` does)::
Run benchmarks against the system-installed SciPy rather than rebuilding::
python runtests.py -n --bench sparse.Arithmetic
Run ASV commands::
cd benchmarks
asv run --skip-existing-commits --steps 10 ALL
asv publish
asv preview
./run.py run --skip-existing-commits --steps 10 ALL
./run.py publish
./run.py preview
The ``run.py`` script sets up some environment variables and does other minor
maintenance jobs for you. The benchmark suite is runnable directly using the
``asv`` command.
More on how to use ``asv`` can be found in `ASV documentation`_
Command-line help is available as usual via ``asv --help`` and
@ -43,19 +50,19 @@ Command-line help is available as usual via ``asv --help`` and
Writing benchmarks
------------------
See `ASV documentation`_ for the basics on how to write benchmarks.
See `ASV documentation`_ for basics on how to write benchmarks.
Some things to consider:
- When importing things from SciPy on the top of the test files, do it as::
- When importing things from Scipy on the top of the test files, do it as::
from .common import safe_import
with safe_import():
try:
from scipy.sparse.linalg import onenormest
except ImportError:
pass
The benchmark files need to be importable also when benchmarking old versions
of SciPy. The benchmarks themselves don't need any guarding against missing
of Scipy. The benchmarks themselves don't need any guarding against missing
features --- only the top-level imports.
- Try to keep the runtime of the benchmark reasonable.
@ -64,29 +71,9 @@ Some things to consider:
time measurements via ``time.clock``, even if it requires some juggling when
writing the benchmark.
- Preparing arrays etc., should generally be put in the ``setup`` method rather
- Preparing arrays etc. should generally be put in the ``setup`` method rather
than the ``time_`` methods, to avoid counting preparation time together with
the time of the benchmarked operation.
- Use ``run_monitored`` from ``common.py`` if you need to measure memory usage.
- Benchmark versioning: by default ``asv`` invalidates old results
when there is any code change in the benchmark routine or in
setup/setup_cache.
This can be controlled manually by setting a fixed benchmark version
number, using the ``version`` attribute. See `ASV documentation`_
for details.
If set manually, the value needs to be changed manually when old
results should be invalidated. In case you want to preserve previous
benchmark results when the benchmark did not previously have a
manual ``version`` attribute, the automatically computed default
values can be found in ``results/benchmark.json``.
- Benchmark attributes such as ``params`` and ``param_names`` must be
the same regardless of whether some features are available, or
e.g. SCIPY_XSLOW=1 is set.
Instead, benchmarks that should not be run can be skipped by raising
``NotImplementedError`` in ``setup()``.

View File

@ -7,33 +7,20 @@
"project": "scipy",
// The project's homepage
"project_url": "https://scipy.org/",
"project_url": "https://scipy.org/scipylib/",
// The URL of the source code repository for the project being
// benchmarked
"repo": "..",
"dvcs": "git",
"branches": ["HEAD"],
"branches": ["master"],
// Customizable commands for building, installing, and
// uninstalling the project. See asv.conf.json documentation.
//
// "install_command": ["in-dir={env_dir} python -mpip install {wheel_file}"],
// "uninstall_command": ["return-code=any python -mpip uninstall -y {project}"],
// "build_command": [
// "PIP_NO_BUILD_ISOLATION=false python -m pip install . --no-deps --no-index -w {build_cache_dir} {build_dir}"
// ],
"build_command": [
"python -m build --wheel -o {build_cache_dir} {build_dir}"
],
// The base URL to show a commit for the project.
"show_commit_url": "https://github.com/scipy/scipy/commit/",
// The base URL to "how a commit for the project.
"show_commit_url": "http://github.com/scipy/scipy/commit/",
// The Pythons you'd like to test against. If not provided, defaults
// to the current version of Python used to run `asv`.
// "pythons": ["3.6"],
"pythons": ["3.6"],
// The matrix of dependencies to test. Each key is the name of a
// package (in PyPI) and the values are version numbers. An empty
@ -41,11 +28,10 @@
// version.
"matrix": {
"numpy": [],
"Cython": [],
"Tempita": [],
"Cython": ["0.27.3"],
"pytest": [],
"pythran": [],
"pybind11": [],
"meson-python": [],
"six": [],
},
// The directory (relative to the current directory) that benchmarks are
@ -56,14 +42,8 @@
// environments in. If not provided, defaults to "env"
"env_dir": "env",
// The tool to use to create environments. May be "conda",
// "virtualenv" or other value depending on the plugins in use.
// If missing or the empty string, the tool will be automatically
// determined by looking for tools on the PATH environment
// variable.
"environment_type": "virtualenv",
// "environment_type": "mamba",
"build_cache_size": 10,
"wheel_cache_size": 10,
// The directory (relative to the current directory) that raw benchmark
// results are stored in. If not provided, defaults to "results".

View File

@ -1,29 +0,0 @@
from .common import XPBenchmark, safe_import
with safe_import():
from scipy._lib.array_api_compat import array_namespace as compat_namespace
from scipy._lib._array_api import array_namespace
class ArrayNamespace(XPBenchmark):
def setup(self, backend):
def f(x):
_ = array_namespace(x)
return x
super().setup(backend, f)
self.x = self.synchronize(self.xp.empty(0))
# Populate @lru_cache and jax.jit. Note that this benefits all backends.
self.func(self.x)
def time_array_namespace(self, backend):
"""scipy wrapper around array_api_compat.array_namespace"""
array_namespace(self.x)
def time_compat_namespace(self, backend):
"""Bare array_api_compat.array_namespace"""
compat_namespace(self.x)
def time_trivial_func(self, backend):
"""Trivial function that internally calls `xp=array_namespace(*args)`"""
self.func(self.x)

View File

@ -1,32 +0,0 @@
import numpy as np
from .common import Benchmark, safe_import
with safe_import():
import scipy.linalg.blas as bla
class GetBlasLapackFuncs(Benchmark):
"""
Test the speed of grabbing the correct BLAS/LAPACK routine flavor.
In particular, upon receiving strange dtype arrays the results shouldn't
diverge too much. Hence the results here should be comparable
"""
param_names = ['dtype1', 'dtype2',
'dtype1_ord', 'dtype2_ord',
'size']
params = [
['b', 'G', 'd'],
['d', 'F', '?'],
['C', 'F'],
['C', 'F'],
[10, 100, 1000]
]
def setup(self, dtype1, dtype2, dtype1_ord, dtype2_ord, size):
self.arr1 = np.empty(size, dtype=dtype1, order=dtype1_ord)
self.arr2 = np.empty(size, dtype=dtype2, order=dtype2_ord)
def time_find_best_blas_type(self, dtype1, dtype2, dtype1_ord, dtype2_ord, size):
prefix, dtype, prefer_fortran = bla.find_best_blas_type((self.arr1, self.arr2))

View File

@ -1,71 +1,25 @@
import warnings
import numpy as np
from .common import Benchmark, XPBenchmark, is_xslow, safe_import
from .common import Benchmark
with safe_import():
from scipy.cluster.hierarchy import linkage, is_isomorphic
from scipy.cluster.vq import kmeans, kmeans2, vq, whiten
try:
from scipy.cluster.hierarchy import linkage
from scipy.cluster.vq import kmeans, kmeans2, vq
except ImportError:
pass
class Linkage(XPBenchmark):
method = ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']
param_names = (*XPBenchmark.param_names, "size", "method")
if is_xslow():
size = [100, 180, 325, 585, 1054, 1898, 3420, 6162, 11101, 20000]
else:
size = [2000]
params = (*XPBenchmark.params, size, method)
class HierarchyLinkage(Benchmark):
params = ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']
param_names = ['method']
def setup(self, backend, size, method):
super().setup(backend, linkage, static_argnames="method")
def __init__(self):
rnd = np.random.RandomState(0)
self.X = rnd.randn(2000, 2)
rng = np.random.default_rng(0)
y = self.xp.asarray(rng.standard_normal((size, 2)))
self.y = self.synchronize(y)
if self.warmup:
self.func(self.y, method=method)
def time_linkage(self, backend, size, method):
self.func(self.y, method=method)
class IsIsomorphic(XPBenchmark):
NCLUSTERS = 5
# This is very slow and memory intensive, but necessary to
# let _most_ backends approach O(n*logn) behaviour.
# Note: memory usage = 16 * nobs
if is_xslow():
nobs = [100, 1_000, 10_000, 100_000, 1_000_000, 10_000_000, 100_000_000]
else:
nobs = [100, 100_000]
# Skip cpu backends for nobs greater than this.
# They should all have reached O(n*logn) behaviour by then.
CPU_MAX_OBS = 1_000_000
param_names = (*XPBenchmark.param_names, "nobs")
params = (*XPBenchmark.params, nobs)
def setup(self, backend, nobs):
use_cuda = backend == "cupy" or backend.endswith(":cuda")
if not use_cuda and nobs > self.CPU_MAX_OBS:
raise NotImplementedError("Skipping huge size on CPU")
super().setup(backend, is_isomorphic)
rng = np.random.default_rng(0)
a = self.xp.asarray(rng.integers(0, self.NCLUSTERS, size=nobs))
p = self.xp.asarray(rng.permutation(self.NCLUSTERS))
b = self.xp.take(p, a)
self.a, self.b = self.synchronize(a, b)
if self.warmup:
self.func(self.a, self.b)
def time_is_isomorphic(self, backend, nobs):
self.func(self.a, self.b)
def time_linkage(self, method):
linkage(self.X, method=method)
class KMeans(Benchmark):
@ -79,27 +33,12 @@ class KMeans(Benchmark):
def time_kmeans(self, k):
kmeans(self.obs, k, iter=10)
class KMeans2(Benchmark):
params = [[2, 10, 50], ['random', 'points', '++']]
param_names = ['k', 'init']
def __init__(self):
rnd = np.random.RandomState(0)
self.obs = rnd.rand(1000, 5)
def time_kmeans2(self, k, init):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
("One of the clusters is empty. Re-run kmeans with a "
"different initialization"),
UserWarning)
kmeans2(self.obs, k, minit=init, iter=10)
def time_kmeans2(self, k):
kmeans2(self.obs, k, iter=10)
class VQ(Benchmark):
params = [[2, 10, 50], ['float32', 'float64']]
params = [[2, 10, 50], ['float32', 'float64', 'float128']]
param_names = ['k', 'dtype']
def __init__(self):
@ -113,27 +52,3 @@ class VQ(Benchmark):
def time_vq(self, k, dtype):
vq(self.obs, self.cbook)
class Whiten(XPBenchmark):
if is_xslow():
shape = [(10, 10), (32, 32), (100, 100), (320, 320),
(1000, 1000), (3200, 3200), (10_000, 10_000)]
else:
shape = [(10, 10), (100, 100)]
param_names = (*XPBenchmark.param_names, "shape")
params = (*XPBenchmark.params, shape)
def setup(self, backend, shape):
super().setup(backend, whiten, static_argnames="check_finite")
rng = np.random.default_rng(0)
obs = self.xp.asarray(rng.uniform(0, 100.0, size=shape))
self.obs = self.synchronize(obs)
if self.warmup:
self.func(self.obs, check_finite=False)
def time_whiten(self, backend, shape):
self.func(self.obs, check_finite=False)

View File

@ -1,59 +0,0 @@
import numpy as np
try:
from scipy.cluster.hierarchy import DisjointSet
except ImportError:
pass
from .common import Benchmark
class Bench(Benchmark):
params = [[100, 1000, 10000]]
param_names = ['n']
def setup(self, n):
# Create random edges
rng = np.random.RandomState(seed=0)
self.edges = rng.randint(0, 10 * n, (n, 2))
self.nodes = np.unique(self.edges)
self.disjoint_set = DisjointSet(self.nodes)
self.pre_merged = DisjointSet(self.nodes)
for a, b in self.edges:
self.pre_merged.merge(a, b)
self.pre_merged_found = DisjointSet(self.nodes)
for a, b in self.edges:
self.pre_merged_found.merge(a, b)
for x in self.nodes:
self.pre_merged_found[x]
def time_merge(self, n):
dis = self.disjoint_set
for a, b in self.edges:
dis.merge(a, b)
def time_merge_already_merged(self, n):
dis = self.pre_merged
for a, b in self.edges:
dis.merge(a, b)
def time_find(self, n):
dis = self.pre_merged
return [dis[i] for i in self.nodes]
def time_find_already_found(self, n):
dis = self.pre_merged_found
return [dis[i] for i in self.nodes]
def time_contains(self, n):
assert self.nodes[0] in self.pre_merged
assert self.nodes[n // 2] in self.pre_merged
assert self.nodes[-1] in self.pre_merged
def time_absence(self, n):
# Test for absence
assert None not in self.pre_merged
assert "dummy" not in self.pre_merged
assert (1, 2, 3) not in self.pre_merged

View File

@ -1,352 +1,23 @@
"""
Airspeed Velocity benchmark utilities
"""
from __future__ import division, absolute_import, print_function
import sys
import os
import re
import time
import textwrap
import subprocess
import itertools
import random
from asv_runner.benchmarks.mark import SkipNotImplemented
class Benchmark:
class Benchmark(object):
"""
Base class with sensible options
"""
goal_time = 0.25
class XPBenchmark(Benchmark):
"""
Base class for benchmarks that are run on multiple Array API backends
and devices. Supports multiple devices, jax.jit, and lazy/asynchronous
evaluation.
Basic usage
-----------
::
def myfunc(x):
return x + 1
class MyFunc(XPBenchmark):
def setup(self, backend):
super().setup(backend, myfunc)
x = self.xp.arange(5)
self.x = self.synchronize(x)
if self.warmup:
self.func(self.x)
def time_myfunc(self, backend):
self.func(self.x)
Adding parameters
-----------------
In the below example:
- We add a `size` asv parameter
- We add a `plus` function parameter which can't be traced by jax.jit
::
def myfunc(x, plus=True):
return x + 1 if plus else x - 1
class MyFunc(XPBenchmark):
param_names = (*XPBenchmark.param_names, "size")
params = (*XPBenchmark.params, [5, 10])
def setup(self, backend, size):
super().setup(backend, myfunc, static_argnames=("plus",))
x = self.xp.arange(size)
self.x = self.synchronize(x)
if self.warmup:
self.func(self.x, plus=True)
self.func(self.x, plus=False)
def time_myfunc_plus(self, backend, size):
self.func(self.x, plus=True)
def time_myfunc_minus(self, backend, size):
self.func(self.x, plus=False)
"""
backends = ["numpy", "array_api_strict", "cupy", "torch:cpu", "torch:cuda",
"dask.array", "jax.numpy:cpu", "jax.numpy:cuda"]
# subclasses can override these
param_names = ("backend",)
params = (backends, )
def setup(self, backend, func, *, static_argnums=None, static_argnames=None):
"""Skip benchmark if backend/device combination is not available.
Configure namespace.
Potentially wrap func with jax.jit and ensure timings are correct
for lazy backends.
Parameters
----------
backend : str
backend name from asv parameterization
func : callable
function to benchmark
static_argnums : Sequence[int], optional
Parameter for jax.jit. Note that, unlike in the unit tests,
we can't use the automatic parameter and return value wrap/unwrap
from `array_api_extra.testing.lazy_xp_function`, as it comes with a
substantial performance overhead.
static_argnames : Sequence[str], optional
Parameter for jax.jit
Sets attributes
---------------
backend : str
As the parameter (for convenience of helper functions)
xp : namespace
array namespace, potentially wrapped by array_api_compat
func : callable
function to benchmark, potentially wrapped
warmup : bool
Whether setup() should run a warmup iteration
"""
self.backend = backend
if ":" in backend:
backend, device = backend.split(":")
else:
device = "cuda" if backend == "cupy" else "cpu"
with safe_import() as array_api_imports:
# Requires scipy >=1.16
from scipy._lib._array_api import array_namespace, xp_capabilities_table
from scipy.conftest import xp_available_backends, xp_known_backends
if isinstance(xp_available_backends, dict): # scipy == 1.16
backends = xp_available_backends
else: # scipy >= 1.17
backends = {p.id: p.values[0] for p in xp_available_backends}
if array_api_imports.error:
# On older scipy versions, disregard SCIPY_ARRAY_API
import numpy as np
def array_namespace(*args, **kwargs):
return np
xp_capabilities_table = {}
backends = {"numpy": np}
xp_known_backends = {"numpy"}
# If new backends are added to conftest.py, you need to add them here too
assert not xp_known_backends - set(n.split(":")[0] for n in self.backends)
try:
xp = backends[backend]
except KeyError:
raise SkipNotImplemented(
f"{backend} not available or skipped by SCIPY_ARRAY_API")
if func and func in xp_capabilities_table:
capabilities = xp_capabilities_table[func]
skips = {n for n, _ in capabilities["skip_backends"]}
skips |= {n for n, _ in capabilities["xfail_backends"]}
if (((capabilities["cpu_only"] and device != "cpu")
or (capabilities["np_only"] and backend != "numpy"))
and backend not in capabilities["exceptions"]):
skips.add(backend)
if backend in skips:
raise SkipNotImplemented(f"{backend} skipped by @xp_capabilities")
else:
capabilities = {"jax_jit": False}
# Potentially wrap namespace with array_api_compat
xp = array_namespace(xp.empty(0))
self.xp = xp
self.func = func
self.warmup = False
if backend == "torch":
import torch
torch.set_default_dtype(torch.float64)
try:
torch.empty(0, device=device)
except (RuntimeError, AssertionError):
raise SkipNotImplemented(f"{device=} not available")
torch.set_default_device(device)
if device == "cuda":
def wrapper(*args, **kwargs):
res = func(*args, **kwargs)
torch.cuda.synchronize()
return res
self.func = wrapper
elif backend == "jax.numpy":
import jax
jax.config.update("jax_enable_x64", True)
try:
jax_device = jax.devices(device)[0]
except RuntimeError:
raise SkipNotImplemented(f"{device=} not available")
jax.config.update("jax_default_device", jax_device)
if capabilities["jax_jit"]:
func = jax.jit(func, static_argnames=static_argnames,
static_argnums=static_argnums)
self.warmup = True
def wrapper(*args, **kwargs):
res = func(*args, **kwargs)
jax.block_until_ready(res)
return res
self.func = wrapper
elif backend == "dask.array":
import dask
def wrapper(*args, **kwargs):
res = func(*args, **kwargs)
return dask.compute(res)[0]
self.func = wrapper
elif backend == "cupy":
import cupy
# The default stream is non-blocking.
# As of CuPy 13.4.1, explicit non-blocking streams
# are substantially slower.
# cupy.cuda.Stream(non_blocking=True).use()
def wrapper(*args, **kwargs):
res = func(*args, **kwargs)
cupy.cuda.get_current_stream().synchronize()
return res
self.func = wrapper
else:
assert backend in ("numpy", "array_api_strict")
def synchronize(self, *arrays):
"""Wait until the given arrays have finished generating and return a
synchronized instance of them.
You need to call this on all arrays that your setup() function creates.
"""
if self.backend == "dask.array":
import dask
arrays = dask.persist(*arrays)
elif self.backend in ("jax.numpy:cpu", "jax.numpy:cuda"):
import jax
jax.block_until_ready(arrays)
elif self.backend == "torch:cuda":
import torch
torch.cuda.synchronize()
elif self.backend == "cupy":
import cupy
cupy.cuda.get_current_stream().synchronize()
else:
assert self.backend in ("numpy", "array_api_strict", "torch:cpu")
return arrays[0] if len(arrays) == 1 else arrays
def is_xslow():
try:
return int(os.environ.get('SCIPY_XSLOW', '0'))
except ValueError:
return False
class LimitedParamBenchmark(Benchmark):
"""
Limits parameter combinations to `max_number` choices, chosen
pseudo-randomly with fixed seed.
Raises NotImplementedError (skip) if not in active set.
"""
num_param_combinations = 0
def setup(self, *args, **kwargs):
slow = is_xslow()
if slow:
# no need to skip
return
param_seed = kwargs.pop('param_seed', None)
if param_seed is None:
param_seed = 1
params = kwargs.pop('params', None)
if params is None:
params = self.params
num_param_combinations = kwargs.pop('num_param_combinations', None)
if num_param_combinations is None:
num_param_combinations = self.num_param_combinations
all_choices = list(itertools.product(*params))
rng = random.Random(param_seed)
rng.shuffle(all_choices)
active_choices = all_choices[:num_param_combinations]
if args not in active_choices:
raise NotImplementedError("skipped")
def get_max_rss_bytes(rusage):
"""
Extract the max RSS value in bytes.
"""
if not rusage:
return None
if sys.platform.startswith('linux'):
# On Linux getrusage() returns ru_maxrss in kilobytes
# https://man7.org/linux/man-pages/man2/getrusage.2.html
return rusage.ru_maxrss * 1024
elif sys.platform == "darwin":
# on macOS ru_maxrss is in bytes
return rusage.ru_maxrss
else:
# Unknown, just return whatever is here.
return rusage.ru_maxrss
def run_monitored_wait4(code):
"""
Run code in a new Python process, and monitor peak memory usage.
Returns
-------
duration : float
Duration in seconds (including Python startup time)
peak_memusage : int
Peak memory usage in bytes of the child Python process
Notes
-----
Works on Unix platforms (Linux, macOS) that have `os.wait4()`.
"""
code = textwrap.dedent(code)
start = time.time()
process = subprocess.Popen([sys.executable, '-c', code])
pid, returncode, rusage = os.wait4(process.pid, 0)
duration = time.time() - start
max_rss_bytes = get_max_rss_bytes(rusage)
if returncode != 0:
raise AssertionError(f"Running failed:\n{code}")
return duration, max_rss_bytes
def run_monitored_proc(code):
def run_monitored(code):
"""
Run code in a new Python process, and monitor peak memory usage.
@ -372,10 +43,10 @@ def run_monitored_proc(code):
if ret is not None:
break
with open(f'/proc/{process.pid}/status') as f:
with open('/proc/%d/status' % process.pid, 'r') as f:
procdata = f.read()
m = re.search(r'VmRSS:\s*(\d+)\s*kB', procdata, re.S | re.I)
m = re.search('VmRSS:\s*(\d+)\s*kB', procdata, re.S | re.I)
if m is not None:
memusage = float(m.group(1)) * 1e3
peak_memusage = max(memusage, peak_memusage)
@ -387,38 +58,22 @@ def run_monitored_proc(code):
duration = time.time() - start
if process.returncode != 0:
raise AssertionError(f"Running failed:\n{code}")
raise AssertionError("Running failed:\n%s" % code)
return duration, peak_memusage
def run_monitored(code):
"""
Run code in a new Python process, and monitor peak memory usage.
Returns
-------
duration : float
Duration in seconds (including Python startup time)
peak_memusage : float or int
Peak memory usage (rough estimate only) in bytes
"""
if hasattr(os, 'wait4'):
return run_monitored_wait4(code)
else:
return run_monitored_proc(code)
def get_mem_info():
"""Get information about available memory"""
import psutil
vm = psutil.virtual_memory()
return {
"memtotal": vm.total,
"memavailable": vm.available,
}
if not sys.platform.startswith('linux'):
raise RuntimeError("Memory information implemented only for Linux")
info = {}
with open('/proc/meminfo', 'r') as f:
for line in f:
p = line.split()
info[p[0].strip(':').lower()] = float(p[1]) * 1e3
return info
def set_mem_rlimit(max_mem=None):
@ -433,11 +88,7 @@ def set_mem_rlimit(max_mem=None):
if cur_limit[0] > 0:
max_mem = min(max_mem, cur_limit[0])
try:
resource.setrlimit(resource.RLIMIT_AS, (max_mem, cur_limit[1]))
except ValueError:
# on macOS may raise: current limit exceeds maximum limit
pass
resource.setrlimit(resource.RLIMIT_AS, (max_mem, cur_limit[1]))
def with_attributes(**attrs):
@ -446,18 +97,3 @@ def with_attributes(**attrs):
setattr(func, key, value)
return func
return decorator
class safe_import:
def __enter__(self):
self.error = False
return self
def __exit__(self, type_, value, traceback):
if type_ is not None:
self.error = True
suppress = not (
os.getenv('SCIPY_ALLOW_BENCH_IMPORT_ERRORS', '1').lower() in
('0', 'false') or not issubclass(type_, ImportError))
return suppress

View File

@ -1,59 +0,0 @@
# This is a python implementation of calfun.m,
# provided at https://github.com/POptUS/BenDFO
import numpy as np
from .dfovec import dfovec
def norm(x, type=2):
if type == 1:
return np.sum(np.abs(x))
elif type == 2:
return np.sqrt(x ** 2)
else: # type==np.inf:
return max(np.abs(x))
def calfun(x, m, nprob, probtype="smooth", noise_level=1e-3):
n = len(x)
# Restrict domain for some nondiff problems
xc = x
if probtype == "nondiff":
if (
nprob == 8
or nprob == 9
or nprob == 13
or nprob == 16
or nprob == 17
or nprob == 18
):
xc = max(x, 0)
# Generate the vector
fvec = dfovec(m, n, xc, nprob)
# Calculate the function value
if probtype == "noisy3":
sigma = noise_level
u = sigma * (-np.ones(m) + 2 * np.random.rand(m))
fvec = fvec * (1 + u)
y = np.sum(fvec ** 2)
elif probtype == "wild3":
sigma = noise_level
phi = 0.9 * np.sin(100 * norm(x, 1)) * np.cos(
100 * norm(x, np.inf)
) + 0.1 * np.cos(norm(x, 2))
phi = phi * (4 * phi ** 2 - 3)
y = (1 + sigma * phi) * sum(fvec ** 2)
elif probtype == "smooth":
y = np.sum(fvec ** 2)
elif probtype == "nondiff":
y = np.sum(np.abs(fvec))
else:
print(f"invalid probtype {probtype}")
return None
# Never return nan. Return inf instead so that
# optimization algorithms treat it as out of bounds.
if np.isnan(y):
return np.inf
return y

View File

@ -1,53 +0,0 @@
1 9 45 0
1 9 45 1
2 7 35 0
2 7 35 1
3 7 35 0
3 7 35 1
4 2 2 0
4 2 2 1
5 3 3 0
5 3 3 1
6 4 4 0
6 4 4 1
7 2 2 0
7 2 2 1
8 3 15 0
8 3 15 1
9 4 11 0
10 3 16 0
11 6 31 0
11 6 31 1
11 9 31 0
11 9 31 1
11 12 31 0
11 12 31 1
12 3 10 0
13 2 10 0
14 4 20 0
14 4 20 1
15 6 6 0
15 7 7 0
15 8 8 0
15 9 9 0
15 10 10 0
15 11 11 0
16 10 10 0
17 5 33 0
18 11 65 0
18 11 65 1
19 8 8 0
19 10 12 0
19 11 14 0
19 12 16 0
20 5 5 0
20 6 6 0
20 8 8 0
21 5 5 0
21 5 5 1
21 8 8 0
21 10 10 0
21 12 12 0
21 12 12 1
22 8 8 0
22 8 8 1

View File

@ -1,377 +0,0 @@
# This is a python implementation of dfovec.m,
# provided at https://github.com/POptUS/BenDFO
import numpy as np
def dfovec(m, n, x, nprob):
# Set lots of constants:
c13 = 1.3e1
c14 = 1.4e1
c29 = 2.9e1
c45 = 4.5e1
v = [
4.0e0,
2.0e0,
1.0e0,
5.0e-1,
2.5e-1,
1.67e-1,
1.25e-1,
1.0e-1,
8.33e-2,
7.14e-2,
6.25e-2,
]
y1 = [
1.4e-1,
1.8e-1,
2.2e-1,
2.5e-1,
2.9e-1,
3.2e-1,
3.5e-1,
3.9e-1,
3.7e-1,
5.8e-1,
7.3e-1,
9.6e-1,
1.34e0,
2.1e0,
4.39e0,
]
y2 = [
1.957e-1,
1.947e-1,
1.735e-1,
1.6e-1,
8.44e-2,
6.27e-2,
4.56e-2,
3.42e-2,
3.23e-2,
2.35e-2,
2.46e-2,
]
y3 = [
3.478e4,
2.861e4,
2.365e4,
1.963e4,
1.637e4,
1.372e4,
1.154e4,
9.744e3,
8.261e3,
7.03e3,
6.005e3,
5.147e3,
4.427e3,
3.82e3,
3.307e3,
2.872e3,
]
y4 = [
8.44e-1,
9.08e-1,
9.32e-1,
9.36e-1,
9.25e-1,
9.08e-1,
8.81e-1,
8.5e-1,
8.18e-1,
7.84e-1,
7.51e-1,
7.18e-1,
6.85e-1,
6.58e-1,
6.28e-1,
6.03e-1,
5.8e-1,
5.58e-1,
5.38e-1,
5.22e-1,
5.06e-1,
4.9e-1,
4.78e-1,
4.67e-1,
4.57e-1,
4.48e-1,
4.38e-1,
4.31e-1,
4.24e-1,
4.2e-1,
4.14e-1,
4.11e-1,
4.06e-1,
]
y5 = [
1.366e0,
1.191e0,
1.112e0,
1.013e0,
9.91e-1,
8.85e-1,
8.31e-1,
8.47e-1,
7.86e-1,
7.25e-1,
7.46e-1,
6.79e-1,
6.08e-1,
6.55e-1,
6.16e-1,
6.06e-1,
6.02e-1,
6.26e-1,
6.51e-1,
7.24e-1,
6.49e-1,
6.49e-1,
6.94e-1,
6.44e-1,
6.24e-1,
6.61e-1,
6.12e-1,
5.58e-1,
5.33e-1,
4.95e-1,
5.0e-1,
4.23e-1,
3.95e-1,
3.75e-1,
3.72e-1,
3.91e-1,
3.96e-1,
4.05e-1,
4.28e-1,
4.29e-1,
5.23e-1,
5.62e-1,
6.07e-1,
6.53e-1,
6.72e-1,
7.08e-1,
6.33e-1,
6.68e-1,
6.45e-1,
6.32e-1,
5.91e-1,
5.59e-1,
5.97e-1,
6.25e-1,
7.39e-1,
7.1e-1,
7.29e-1,
7.2e-1,
6.36e-1,
5.81e-1,
4.28e-1,
2.92e-1,
1.62e-1,
9.8e-2,
5.4e-2,
]
# Initialize things
fvec = np.zeros(m)
total = 0
if nprob == 1: # Linear function - full rank.
for j in range(n):
total = total + x[j]
temp = 2 * total / m + 1
for i in range(m):
fvec[i] = -temp
if i < n:
fvec[i] = fvec[i] + x[i]
elif nprob == 2: # Linear function - rank 1.
for j in range(n):
total = total + (j + 1) * x[j]
for i in range(m):
fvec[i] = (i + 1) * total - 1
elif nprob == 3: # Linear function - rank 1 with zero columns and rows.
for j in range(1, n - 1):
total = total + (j + 1) * x[j]
for i in range(m - 1):
fvec[i] = i * total - 1
fvec[m - 1] = -1
elif nprob == 4: # Rosenbrock function.
fvec[0] = 10 * (x[1] - x[0] * x[0])
fvec[1] = 1 - x[0]
elif nprob == 5: # Helical valley function.
if x[0] > 0:
th = np.arctan(x[1] / x[0]) / (2 * np.pi)
elif x[0] < 0:
th = np.arctan(x[1] / x[0]) / (2 * np.pi) + 0.5
elif x[0] == x[1] and x[1] == 0:
th = 0.0
else:
th = 0.25
r = np.sqrt(x[0] * x[0] + x[1] * x[1])
fvec[0] = 10 * (x[2] - 10 * th)
fvec[1] = 10 * (r - 1)
fvec[2] = x[2]
elif nprob == 6: # Powell singular function.
fvec[0] = x[0] + 10 * x[1]
fvec[1] = np.sqrt(5) * (x[2] - x[3])
fvec[2] = (x[1] - 2 * x[2]) ** 2
fvec[3] = np.sqrt(10) * (x[0] - x[3]) ** 2
elif nprob == 7: # Freudenstein and Roth function.
fvec[0] = -c13 + x[0] + ((5 - x[1]) * x[1] - 2) * x[1]
fvec[1] = -c29 + x[0] + ((1 + x[1]) * x[1] - c14) * x[1]
elif nprob == 8: # Bard function.
for i in range(15):
tmp1 = i + 1
tmp2 = 15 - i
tmp3 = tmp1
if i > 7:
tmp3 = tmp2
fvec[i] = y1[i] - (x[0] + tmp1 / (x[1] * tmp2 + x[2] * tmp3))
elif nprob == 9: # Kowalik and Osborne function.
for i in range(11):
tmp1 = v[i] * (v[i] + x[1])
tmp2 = v[i] * (v[i] + x[2]) + x[3]
fvec[i] = y2[i] - x[0] * tmp1 / tmp2
elif nprob == 10: # Meyer function.
for i in range(16):
temp = 5 * (i + 1) + c45 + x[2]
tmp1 = x[1] / temp
tmp2 = np.exp(tmp1)
fvec[i] = x[0] * tmp2 - y3[i]
elif nprob == 11: # Watson function.
for i in range(29):
div = (i + 1) / c29
s1 = 0
dx = 1
for j in range(1, n):
s1 = s1 + j * dx * x[j]
dx = div * dx
s2 = 0
dx = 1
for j in range(n):
s2 = s2 + dx * x[j]
dx = div * dx
fvec[i] = s1 - s2 * s2 - 1
fvec[29] = x[0]
fvec[30] = x[1] - x[0] * x[0] - 1
elif nprob == 12: # Box 3-dimensional function.
for i in range(m):
temp = i + 1
tmp1 = temp / 10
fvec[i] = (
np.exp(-tmp1 * x[0])
- np.exp(-tmp1 * x[1])
+ (np.exp(-temp) - np.exp(-tmp1)) * x[2]
)
elif nprob == 13: # Jennrich and Sampson function.
for i in range(m):
temp = i + 1
fvec[i] = 2 + 2 * temp - np.exp(temp * x[0]) - np.exp(temp * x[1])
elif nprob == 14: # Brown and Dennis function.
for i in range(m):
temp = (i + 1) / 5
tmp1 = x[0] + temp * x[1] - np.exp(temp)
tmp2 = x[2] + np.sin(temp) * x[3] - np.cos(temp)
fvec[i] = tmp1 * tmp1 + tmp2 * tmp2
elif nprob == 15: # Chebyquad function.
for j in range(n):
t1 = 1
t2 = 2 * x[j] - 1
t = 2 * t2
for i in range(m):
fvec[i] = fvec[i] + t2
th = t * t2 - t1
t1 = t2
t2 = th
iev = -1
for i in range(m):
fvec[i] = fvec[i] / n
if iev > 0:
fvec[i] = fvec[i] + 1 / ((i + 1) ** 2 - 1)
iev = -iev
elif nprob == 16: # Brown almost-linear function.
total1 = -(n + 1)
prod1 = 1
for j in range(n):
total1 = total1 + x[j]
prod1 = x[j] * prod1
for i in range(n - 1):
fvec[i] = x[i] + total1
fvec[n - 1] = prod1 - 1
elif nprob == 17: # Osborne 1 function.
for i in range(33):
temp = 10 * i
tmp1 = np.exp(-x[3] * temp)
tmp2 = np.exp(-x[4] * temp)
fvec[i] = y4[i] - (x[0] + x[1] * tmp1 + x[2] * tmp2)
elif nprob == 18: # Osborne 2 function.
for i in range(65):
temp = i / 10
tmp1 = np.exp(-x[4] * temp)
tmp2 = np.exp(-x[5] * (temp - x[8]) ** 2)
tmp3 = np.exp(-x[6] * (temp - x[9]) ** 2)
tmp4 = np.exp(-x[7] * (temp - x[10]) ** 2)
fvec[i] = y5[i] - (x[0] * tmp1 + x[1] * tmp2 + x[2] * tmp3 + x[3] * tmp4)
elif nprob == 19: # Bdqrtic
# n >= 5, m = (n-4)*2
for i in range(n - 4):
fvec[i] = -4 * x[i] + 3.0
fvec[n - 4 + i] = (
x[i] ** 2
+ 2 * x[i + 1] ** 2
+ 3 * x[i + 2] ** 2
+ 4 * x[i + 3] ** 2
+ 5 * x[n - 1] ** 2
)
elif nprob == 20: # Cube
# n = 2, m = n
fvec[0] = x[0] - 1.0
for i in range(1, n):
fvec[i] = 10 * (x[i] - x[i - 1] ** 3)
elif nprob == 21: # Mancino
# n = 2, m = n
for i in range(n):
ss = 0
for j in range(n):
v2 = np.sqrt(x[i] ** 2 + (i + 1) / (j + 1))
ss = ss + v2 * ((np.sin(np.log(v2))) ** 5 + (np.cos(np.log(v2))) ** 5)
fvec[i] = 1400 * x[i] + (i - 49) ** 3 + ss
elif nprob == 22: # Heart8ls
# m = n = 8
fvec[0] = x[0] + x[1] + 0.69
fvec[1] = x[2] + x[3] + 0.044
fvec[2] = x[4] * x[0] + x[5] * x[1] - x[6] * x[2] - x[7] * x[3] + 1.57
fvec[3] = x[6] * x[0] + x[7] * x[1] + x[4] * x[2] + x[5] * x[3] + 1.31
fvec[4] = (
x[0] * (x[4] ** 2 - x[6] ** 2)
- 2.0 * x[2] * x[4] * x[6]
+ x[1] * (x[5] ** 2 - x[7] ** 2)
- 2.0 * x[3] * x[5] * x[7]
+ 2.65
)
fvec[5] = (
x[2] * (x[4] ** 2 - x[6] ** 2)
+ 2.0 * x[0] * x[4] * x[6]
+ x[3] * (x[5] ** 2 - x[7] ** 2)
+ 2.0 * x[1] * x[5] * x[7]
- 2.0
)
fvec[6] = (
x[0] * x[4] * (x[4] ** 2 - 3.0 * x[6] ** 2)
+ x[2] * x[6] * (x[6] ** 2 - 3.0 * x[4] ** 2)
+ x[1] * x[5] * (x[5] ** 2 - 3.0 * x[7] ** 2)
+ x[3] * x[7] * (x[7] ** 2 - 3.0 * x[5] ** 2)
+ 12.6
)
fvec[7] = (
x[2] * x[4] * (x[4] ** 2 - 3.0 * x[6] ** 2)
- x[0] * x[6] * (x[6] ** 2 - 3.0 * x[4] ** 2)
+ x[3] * x[5] * (x[5] ** 2 - 3.0 * x[7] ** 2)
- x[1] * x[7] * (x[7] ** 2 - 3.0 * x[6] ** 2)
- 9.48
)
else:
print(f"unrecognized function number {nprob}")
return None
return fvec

View File

@ -1,94 +0,0 @@
# This is a python implementation of dfoxs.m,
# provided at https://github.com/POptUS/BenDFO
import numpy as np
def dfoxs(n, nprob, factor):
x = np.zeros(n)
if nprob == 1 or nprob == 2 or nprob == 3: # Linear functions.
x = np.ones(n)
elif nprob == 4: # Rosenbrock function.
x[0] = -1.2
x[1] = 1
elif nprob == 5: # Helical valley function.
x[0] = -1
elif nprob == 6: # Powell singular function.
x[0] = 3
x[1] = -1
x[2] = 0
x[3] = 1
elif nprob == 7: # Freudenstein and Roth function.
x[0] = 0.5
x[1] = -2
elif nprob == 8: # Bard function.
x[0] = 1
x[1] = 1
x[2] = 1
elif nprob == 9: # Kowalik and Osborne function.
x[0] = 0.25
x[1] = 0.39
x[2] = 0.415
x[3] = 0.39
elif nprob == 10: # Meyer function.
x[0] = 0.02
x[1] = 4000
x[2] = 250
elif nprob == 11: # Watson function.
x = 0.5 * np.ones(n)
elif nprob == 12: # Box 3-dimensional function.
x[0] = 0
x[1] = 10
x[2] = 20
elif nprob == 13: # Jennrich and Sampson function.
x[0] = 0.3
x[1] = 0.4
elif nprob == 14: # Brown and Dennis function.
x[0] = 25
x[1] = 5
x[2] = -5
x[3] = -1
elif nprob == 15: # Chebyquad function.
for k in range(n):
x[k] = (k + 1) / (n + 1)
elif nprob == 16: # Brown almost-linear function.
x = 0.5 * np.ones(n)
elif nprob == 17: # Osborne 1 function.
x[0] = 0.5
x[1] = 1.5
x[2] = 1
x[3] = 0.01
x[4] = 0.02
elif nprob == 18: # Osborne 2 function.
x[0] = 1.3
x[1] = 0.65
x[2] = 0.65
x[3] = 0.7
x[4] = 0.6
x[5] = 3
x[6] = 5
x[7] = 7
x[8] = 2
x[9] = 4.5
x[10] = 5.5
elif nprob == 19: # Bdqrtic.
x = np.ones(n)
elif nprob == 20: # Cube.
x = 0.5 * np.ones(n)
elif nprob == 21: # Mancino.
for i in range(n):
ss = 0
for j in range(n):
frac = (i + 1) / (j + 1)
ss = ss + np.sqrt(frac) * (
(np.sin(np.log(np.sqrt(frac)))) ** 5
+ (np.cos(np.log(np.sqrt(frac)))) ** 5
)
x[i] = -8.710996e-4 * ((i - 49) ** 3 + ss)
elif nprob == 22: # Heart8ls.
x = np.asarray([-0.3, -0.39, 0.3, -0.344, -1.2, 2.69, 1.59, -1.5])
else:
print(f"unrecognized function number {nprob}")
return None
return factor * x

View File

@ -1,11 +1,16 @@
from __future__ import division, absolute_import, print_function
import re
import six
import numpy as np
from scipy import special
from .common import with_attributes, safe_import
with safe_import():
try:
from scipy.special import cython_special
except ImportError:
pass
from .common import Benchmark, with_attributes
FUNC_ARGS = {
@ -57,10 +62,10 @@ class _CythonSpecialMeta(type):
return type.__new__(cls, cls_name, bases, dct)
class CythonSpecial(metaclass=_CythonSpecialMeta):
class CythonSpecial(six.with_metaclass(_CythonSpecialMeta)):
def setup(self, name, args, N, api):
self.py_func = getattr(cython_special, f'_bench_{name}_py')
self.cy_func = getattr(cython_special, f'_bench_{name}_cy')
self.py_func = getattr(cython_special, '_bench_{}_py'.format(name))
self.cy_func = getattr(cython_special, '_bench_{}_cy'.format(name))
m = re.match('^(.*)_[dDl]+$', name)
self.np_func = getattr(special, m.group(1))

View File

@ -1,354 +0,0 @@
""" Test functions for fftpack.basic module
"""
from numpy import arange, asarray, zeros, dot, exp, pi, double, cdouble
from numpy.random import rand
import numpy as np
from concurrent import futures
import os
import scipy.fftpack
import numpy.fft
from .common import Benchmark, safe_import
with safe_import() as exc:
import scipy.fft as scipy_fft
has_scipy_fft = True
if exc.error:
has_scipy_fft = False
with safe_import() as exc:
import pyfftw.interfaces.numpy_fft as pyfftw_fft
import pyfftw
pyfftw.interfaces.cache.enable()
has_pyfftw = True
if exc.error:
pyfftw_fft = {} # noqa: F811
has_pyfftw = False
class PyfftwBackend:
"""Backend for pyfftw"""
__ua_domain__ = 'numpy.scipy.fft'
@staticmethod
def __ua_function__(method, args, kwargs):
kwargs.pop('overwrite_x', None)
fn = getattr(pyfftw_fft, method.__name__, None)
return (NotImplemented if fn is None
else fn(*args, **kwargs))
def random(size):
return rand(*size)
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)/n
return y
def get_module(mod_name):
module_map = {
'scipy.fftpack': scipy.fftpack,
'scipy.fft': scipy_fft,
'numpy.fft': numpy.fft
}
if not has_scipy_fft and mod_name == 'scipy.fft':
raise NotImplementedError
return module_map[mod_name]
class Fft(Benchmark):
params = [
[100, 256, 313, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['real', 'cmplx'],
['scipy.fftpack', 'scipy.fft', 'numpy.fft']
]
param_names = ['size', 'type', 'module']
def setup(self, size, cmplx, module):
if cmplx == 'cmplx':
self.x = random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j
else:
self.x = random([size]).astype(double)
module = get_module(module)
self.fft = getattr(module, 'fft')
self.ifft = getattr(module, 'ifft')
def time_fft(self, size, cmplx, module):
self.fft(self.x)
def time_ifft(self, size, cmplx, module):
self.ifft(self.x)
class NextFastLen(Benchmark):
params = [
[12, 13, # small ones
1021, 1024, # 2 ** 10 and a prime
16381, 16384, # 2 ** 14 and a prime
262139, 262144, # 2 ** 17 and a prime
999983, 1048576, # 2 ** 20 and a prime
],
]
param_names = ['size']
def setup(self, size):
if not has_scipy_fft:
raise NotImplementedError
def time_next_fast_len(self, size):
scipy_fft.next_fast_len.__wrapped__(size)
def time_next_fast_len_cached(self, size):
scipy_fft.next_fast_len(size)
class RFft(Benchmark):
params = [
[100, 256, 313, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['scipy.fftpack', 'scipy.fft', 'numpy.fft']
]
param_names = ['size', 'module']
def setup(self, size, module):
self.x = random([size]).astype(double)
module = get_module(module)
self.rfft = getattr(module, 'rfft')
self.irfft = getattr(module, 'irfft')
self.y = self.rfft(self.x)
def time_rfft(self, size, module):
self.rfft(self.x)
def time_irfft(self, size, module):
self.irfft(self.y)
class RealTransforms1D(Benchmark):
params = [
[75, 100, 135, 256, 313, 512, 675, 1024, 2025, 2048],
['I', 'II', 'III', 'IV'],
['scipy.fftpack', 'scipy.fft']
]
param_names = ['size', 'type', 'module']
def setup(self, size, type, module):
module = get_module(module)
self.dct = getattr(module, 'dct')
self.dst = getattr(module, 'dst')
self.type = {'I':1, 'II':2, 'III':3, 'IV':4}[type]
# The "logical" transform size should be smooth, which for dct/dst
# type 1 is offset by -1/+1 respectively
if self.type == 1:
size += 1
self.x = random([size]).astype(double)
if self.type == 1:
self.x_dst = self.x[:-2].copy()
def time_dct(self, size, type, module):
self.dct(self.x, self.type)
def time_dst(self, size, type, module):
x = self.x if self.type != 1 else self.x_dst
self.dst(x, self.type)
class Fftn(Benchmark):
params = [
["100x100", "313x100", "1000x100", "256x256", "512x512"],
['real', 'cmplx'],
['scipy.fftpack', 'scipy.fft', 'numpy.fft']
]
param_names = ['size', 'type', 'module']
def setup(self, size, cmplx, module):
size = list(map(int, size.split("x")))
if cmplx != 'cmplx':
self.x = random(size).astype(double)
else:
self.x = random(size).astype(cdouble)+random(size).astype(cdouble)*1j
self.fftn = getattr(get_module(module), 'fftn')
def time_fftn(self, size, cmplx, module):
self.fftn(self.x)
class RealTransformsND(Benchmark):
params = [
['75x75', '100x100', '135x135', '313x363', '1000x100', '256x256'],
['I', 'II', 'III', 'IV'],
['scipy.fftpack', 'scipy.fft']
]
param_names = ['size', 'type', 'module']
def setup(self, size, type, module):
self.dctn = getattr(get_module(module), 'dctn')
self.dstn = getattr(get_module(module), 'dstn')
self.type = {'I':1, 'II':2, 'III':3, 'IV':4}[type]
# The "logical" transform size should be smooth, which for dct/dst
# type 1 is offset by -1/+1 respectively
size = list(map(int, size.split('x')))
if self.type == 1:
size = (s + 1 for s in size)
self.x = random(size).astype(double)
if self.type == 1:
self.x_dst = self.x[:-2,:-2].copy()
def time_dctn(self, size, type, module):
self.dctn(self.x, self.type)
def time_dstn(self, size, type, module):
x = self.x if self.type != 1 else self.x_dst
self.dstn(x, self.type)
class FftBackends(Benchmark):
params = [
[100, 256, 313, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['real', 'cmplx'],
['pocketfft', 'pyfftw', 'numpy', 'direct']
]
param_names = ['size', 'type', 'backend']
def setup(self, size, cmplx, backend):
import scipy.fft
if cmplx == 'cmplx':
self.x = random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j
else:
self.x = random([size]).astype(double)
self.fft = scipy.fft.fft
self.ifft = scipy.fft.ifft
if backend == 'pocketfft':
scipy.fft.set_global_backend('scipy')
elif backend == 'pyfftw':
if not has_pyfftw:
raise NotImplementedError
scipy.fft.set_global_backend(PyfftwBackend)
elif backend == 'numpy':
from scipy.fft._debug_backends import NumPyBackend
scipy.fft.set_global_backend(NumPyBackend)
elif backend == 'direct':
import scipy.fft._pocketfft
self.fft = scipy.fft._pocketfft.fft
self.ifft = scipy.fft._pocketfft.ifft
def time_fft(self, size, cmplx, module):
self.fft(self.x)
def time_ifft(self, size, cmplx, module):
self.ifft(self.x)
class FftnBackends(Benchmark):
params = [
["100x100", "313x100", "1000x100", "256x256", "512x512"],
['real', 'cmplx'],
['pocketfft', 'pyfftw', 'numpy', 'direct']
]
param_names = ['size', 'type', 'backend']
def setup(self, size, cmplx, backend):
import scipy.fft
size = list(map(int, size.split("x")))
if cmplx == 'cmplx':
self.x = random(size).astype(double)+random(size).astype(double)*1j
else:
self.x = random(size).astype(double)
self.fftn = scipy.fft.fftn
self.ifftn = scipy.fft.ifftn
if backend == 'pocketfft':
scipy.fft.set_global_backend('scipy')
elif backend == 'pyfftw':
if not has_pyfftw:
raise NotImplementedError
scipy.fft.set_global_backend(PyfftwBackend)
elif backend == 'numpy':
from scipy.fft._debug_backends import NumPyBackend
scipy.fft.set_global_backend(NumPyBackend)
elif backend == 'direct':
import scipy.fft._pocketfft
self.fftn = scipy.fft._pocketfft.fftn
self.ifftn = scipy.fft._pocketfft.ifftn
def time_fft(self, size, cmplx, module):
self.fftn(self.x)
def time_ifft(self, size, cmplx, module):
self.ifftn(self.x)
class FftThreading(Benchmark):
params = [
['100x100', '1000x100', '256x256', '512x512'],
[1, 8, 32, 100],
['workers', 'threading']
]
param_names = ['size', 'num_transforms', 'method']
def setup(self, size, num_transforms, method):
if not has_scipy_fft:
raise NotImplementedError
size = list(map(int, size.split("x")))
self.xs = [(random(size)+1j*random(size)).astype(np.complex128)
for _ in range(num_transforms)]
if method == 'threading':
self.pool = futures.ThreadPoolExecutor(os.cpu_count())
def map_thread(self, func):
f = []
for x in self.xs:
f.append(self.pool.submit(func, x))
futures.wait(f)
def time_fft(self, size, num_transforms, method):
if method == 'threading':
self.map_thread(scipy_fft.fft)
else:
for x in self.xs:
scipy_fft.fft(x, workers=-1)
def time_fftn(self, size, num_transforms, method):
if method == 'threading':
self.map_thread(scipy_fft.fftn)
else:
for x in self.xs:
scipy_fft.fftn(x, workers=-1)

View File

@ -0,0 +1,112 @@
""" Test functions for fftpack.basic module
"""
from __future__ import division, absolute_import, print_function
from numpy import arange, asarray, zeros, dot, exp, pi, double, cdouble
import numpy.fft
from numpy.random import rand
try:
from scipy.fftpack import ifft, fft, fftn, irfft, rfft
except ImportError:
pass
from .common import Benchmark
def random(size):
return rand(*size)
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)/n
return y
class Fft(Benchmark):
params = [
[100, 256, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['real', 'cmplx'],
['scipy', 'numpy']
]
param_names = ['size', 'type', 'module']
def setup(self, size, cmplx, module):
if cmplx == 'cmplx':
self.x = random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j
else:
self.x = random([size]).astype(double)
def time_fft(self, size, cmplx, module):
if module == 'numpy':
numpy.fft.fft(self.x)
else:
fft(self.x)
def time_ifft(self, size, cmplx, module):
if module == 'numpy':
numpy.fft.ifft(self.x)
else:
ifft(self.x)
class RFft(Benchmark):
params = [
[100, 256, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['scipy', 'numpy']
]
param_names = ['size', 'module']
def setup(self, size, module):
self.x = random([size]).astype(double)
def time_rfft(self, size, module):
if module == 'numpy':
numpy.fft.rfft(self.x)
else:
rfft(self.x)
def time_irfft(self, size, module):
if module == 'numpy':
numpy.fft.irfft(self.x)
else:
irfft(self.x)
class Fftn(Benchmark):
params = [
["100x100", "1000x100", "256x256", "512x512"],
['real', 'cmplx'],
['scipy', 'numpy']
]
param_names = ['size', 'type', 'module']
def setup(self, size, cmplx, module):
size = map(int, size.split("x"))
if cmplx != 'cmplx':
self.x = random(size).astype(double)
else:
self.x = random(size).astype(cdouble)+random(size).astype(cdouble)*1j
def time_fftn(self, size, cmplx, module):
if module == 'numpy':
numpy.fft.fftn(self.x)
else:
fftn(self.x)

View File

@ -1,10 +1,15 @@
""" Benchmark functions for fftpack.pseudo_diffs module
"""
from numpy import arange, sin, cos, pi, exp, tanh, sign
from .common import Benchmark, safe_import
from __future__ import division, absolute_import, print_function
with safe_import():
from numpy import arange, sin, cos, pi, exp, tanh, sign
try:
from scipy.fftpack import diff, fft, ifft, tilbert, hilbert, shift, fftfreq
except ImportError:
pass
from .common import Benchmark
def direct_diff(x, k=1, period=None):

View File

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
"""
==============================================================================
`go_benchmark_functions` -- Problems for testing global optimization routines
@ -16,20 +18,20 @@ References
.. [1] Momin Jamil and Xin-She Yang, A literature survey of benchmark
functions for global optimization problems, Int. Journal of Mathematical
Modelling and Numerical Optimisation, Vol. 4, No. 2, pp. 150--194 (2013).
https://arxiv.org/abs/1308.4008v1
http://arxiv.org/pdf/1308.4008v1.pdf
(and references contained within)
.. [2] http://infinity77.net/global_optimization/
.. [2] http://infinity77.net/global_optimization/index.html
.. [3] S. K. Mishra, Global Optimization By Differential Evolution and
Particle Swarm Methods: Evaluation On Some Benchmark Functions, Munich
Research Papers in Economics
.. [4] E. P. Adorio, U. P. Dilman, MVF - Multivariate Test Function Library
in C for Unconstrained Global Optimization Methods, [Available Online]:
https://www.geocities.ws/eadorio/mvf.pdf
http://www.geocities.ws/eadorio/mvf.pdf
.. [5] S. K. Mishra, Some New Test Functions For Global Optimization And
Performance of Repulsive Particle Swarm Method, [Available Online]:
https://mpra.ub.uni-muenchen.de/2718/
http://mpra.ub.uni-muenchen.de/2718/
.. [6] NIST StRD Nonlinear Regression Problems, retrieved on 1 Oct, 2014
https://www.itl.nist.gov/div898/strd/nls/nls_main.shtml
http://www.itl.nist.gov/div898/strd/nls/nls_main.shtml
"""

View File

@ -1,10 +1,18 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import abs, asarray
from numpy import (abs, arctan2, asarray, cos, exp, floor, log, log10,
arange, pi, prod, roll, seterr, sign, sin, sqrt, sum, where,
zeros, tan, tanh, dot)
from ..common import safe_import # noqa:F401
try:
from scipy.special import factorial
except ImportError:
pass
class Benchmark:
class Benchmark(object):
"""
Defines a global optimization benchmark problem.
@ -41,15 +49,14 @@ class Benchmark:
custom_bounds : sequence
a list of tuples that contain lower/upper bounds for use in plotting.
"""
change_dimensionality = False
def __init__(self, dimensions):
"""
Initialises the problem
Parameters
----------
dimensions : int
The dimensionality of the problem
"""
@ -58,10 +65,11 @@ class Benchmark:
self.nfev = 0
self.fglob = np.nan
self.global_optimum = None
self.change_dimensionality = False
self.custom_bounds = None
def __str__(self):
return f'{self.__class__.__name__} ({self.N} dimensions)'
return '{0} ({1} dimensions)'.format(self.__class__.__name__, self.N)
def __repr__(self):
return self.__class__.__name__
@ -103,10 +111,9 @@ class Benchmark:
return True
# the solution should still be in bounds, otherwise immediate fail.
bounds = np.asarray(self.bounds, dtype=np.float64)
if np.any(x > bounds[:, 1]):
if np.any(x > np.asfarray(self.bounds)[:, 1]):
return False
if np.any(x < bounds[:, 0]):
if np.any(x < np.asfarray(self.bounds)[:, 0]):
return False
# you found a lower global minimum. This shouldn't happen.
@ -169,9 +176,9 @@ class Benchmark:
@property
def N(self):
"""
"""
The dimensionality of the problem.
Returns
-------
N : int

View File

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import abs, cos, exp, pi, prod, sin, sqrt, sum
from .go_benchmark import Benchmark
@ -28,7 +31,6 @@ class Ackley01(Benchmark):
TODO: the -0.2 factor in the exponent of the first term is given as
-0.02 in Jamil et al.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -36,6 +38,7 @@ class Ackley01(Benchmark):
self._bounds = list(zip([-35.0] * self.N, [35.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -178,7 +181,6 @@ class Alpine01(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -186,6 +188,7 @@ class Alpine01(Benchmark):
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -218,7 +221,6 @@ class Alpine02(Benchmark):
TODO: eqn 7 in [1]_ has the wrong global minimum value.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -226,6 +228,7 @@ class Alpine02(Benchmark):
self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))
self.global_optimum = [[7.91705268, 4.81584232]]
self.fglob = -6.12950
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -257,7 +260,6 @@ class AMGM(Benchmark):
TODO: eqn 7 in [1]_ has the wrong global minimum value.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -265,6 +267,7 @@ class AMGM(Benchmark):
self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))
self.global_optimum = [[1, 1]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1

View File

@ -1,4 +1,7 @@
from numpy import abs, cos, exp, log, arange, pi, sin, sqrt, sum
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import abs, cos, exp, log, arange, pi, roll, sin, sqrt, sum
from .go_benchmark import Benchmark
@ -621,7 +624,6 @@ class Brown(Benchmark):
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -631,6 +633,7 @@ class Brown(Benchmark):
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1

View File

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import (abs, asarray, cos, exp, floor, pi, sign, sin, sqrt, sum,
size, tril, isnan, atleast_2d, repeat)
@ -7,7 +10,8 @@ from .go_benchmark import Benchmark
class CarromTable(Benchmark):
r"""
"""
CarromTable objective function.
The CarromTable [1]_ global optimization problem is a multimodal
@ -49,7 +53,8 @@ class CarromTable(Benchmark):
class Chichinadze(Benchmark):
r"""
"""
Chichinadze objective function.
This class defines the Chichinadze [1]_ global optimization problem. This is a
@ -96,7 +101,8 @@ class Chichinadze(Benchmark):
class Cigar(Benchmark):
r"""
"""
Cigar objective function.
This class defines the Cigar [1]_ global optimization problem. This
@ -115,7 +121,6 @@ class Cigar(Benchmark):
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -126,6 +131,7 @@ class Cigar(Benchmark):
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -134,7 +140,8 @@ class Cigar(Benchmark):
class Cola(Benchmark):
r"""
"""
Cola objective function.
This class defines the Cola global optimization problem. The 17-dimensional
@ -213,13 +220,14 @@ class Cola(Benchmark):
yj = repeat(yi, size(yi, 1), axis=0)
yi = yi.T
inner = (sqrt((xi - xj) ** 2 + (yi - yj) ** 2) - self.d) ** 2
inner = (sqrt(((xi - xj) ** 2 + (yi - yj) ** 2)) - self.d) ** 2
inner = tril(inner, -1)
return sum(sum(inner, axis=1))
class Colville(Benchmark):
r"""
"""
Colville objective function.
This class defines the Colville global optimization problem. This
@ -265,7 +273,8 @@ class Colville(Benchmark):
class Corana(Benchmark):
r"""
"""
Corana objective function.
This class defines the Corana [1]_ global optimization problem. This
@ -317,7 +326,8 @@ class Corana(Benchmark):
class CosineMixture(Benchmark):
r"""
"""
Cosine Mixture objective function.
This class defines the Cosine Mixture global optimization problem. This
@ -326,7 +336,7 @@ class CosineMixture(Benchmark):
.. math::
f_{\text{CosineMixture}}(x) = -0.1 \sum_{i=1}^n \cos(5 \pi x_i)
+ \sum_{i=1}^n x_i^2
- \sum_{i=1}^n x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
@ -335,28 +345,33 @@ class CosineMixture(Benchmark):
*Global optimum*: :math:`f(x) = -0.1N` for :math:`x_i = 0` for
:math:`i = 1, ..., N`
.. [1] Ali, M.M, Khompatraporn, C. , Zabinski, B. A Numerical Evaluation
of Several Stochastic Algorithms on Selected Continuous Global
Optimization Test Problems, Journal of Global Optimization, 2005, 31, 635
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO, Jamil #38 has wrong minimum and wrong fglob. I plotted it.
-(x**2) term is always negative if x is negative.
cos(5 * pi * x) is equal to -1 for x=-1.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-1.0, 1.0)] * self.N
self.change_dimensionality = True
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0. for _ in range(self.N)]]
self.fglob = -0.1 * self.N
self.global_optimum = [[-1. for _ in range(self.N)]]
self.fglob = -0.9 * self.N
def fun(self, x, *args):
self.nfev += 1
return -0.1 * sum(cos(5.0 * pi * x)) + sum(x ** 2.0)
return -0.1 * sum(cos(5.0 * pi * x)) - sum(x ** 2.0)
class CrossInTray(Benchmark):
r"""
"""
Cross-in-Tray objective function.
This class defines the Cross-in-Tray [1]_ global optimization problem. This is a
@ -371,7 +386,7 @@ class CrossInTray(Benchmark):
with :math:`x_i \in [-15, 15]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -2.062611870822739` for :math:`x_i =
*Global optimum*: :math:`f(x) = -2.062611870822739` for :math:`x_i =
\pm 1.349406608602084` for :math:`i = 1, 2`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
@ -399,7 +414,8 @@ class CrossInTray(Benchmark):
class CrossLegTable(Benchmark):
r"""
"""
Cross-Leg-Table objective function.
This class defines the Cross-Leg-Table [1]_ global optimization problem. This
@ -408,7 +424,7 @@ class CrossLegTable(Benchmark):
.. math::
f_{\text{CrossLegTable}}(x) = - \frac{1}{\left(\left|{e^{\left|{100
- \frac{\sqrt{x_{1}^{2} + x_{2}^{2}}}{\pi}}\right|}
- \frac{\sqrt{x_{1}^{2} + x_{2}^{2}}}{\pi}}\right|}
\sin\left(x_{1}\right) \sin\left(x_{2}\right)}\right| + 1\right)^{0.1}}
@ -439,7 +455,8 @@ class CrossLegTable(Benchmark):
class CrownedCross(Benchmark):
r"""
"""
Crowned Cross objective function.
This class defines the Crowned Cross [1]_ global optimization problem. This
@ -479,7 +496,8 @@ class CrownedCross(Benchmark):
class Csendes(Benchmark):
r"""
"""
Csendes objective function.
This class defines the Csendes [1]_ global optimization problem. This is a
@ -501,11 +519,11 @@ class Csendes(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.change_dimensionality = True
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
@ -536,7 +554,8 @@ class Csendes(Benchmark):
class Cube(Benchmark):
r"""
"""
Cube objective function.
This class defines the Cube global optimization problem. This
@ -547,8 +566,7 @@ class Cube(Benchmark):
f_{\text{Cube}}(x) = 100(x_2 - x_1^3)^2 + (1 - x1)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-10, 10]`
for :math:`i=1,...,N`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-10, 10]` for :math:`i=1,...,N`.
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`x = [1, 1]`

View File

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import abs, cos, exp, arange, pi, sin, sqrt, sum, zeros, tanh
from numpy.testing import assert_almost_equal
@ -5,7 +8,8 @@ from .go_benchmark import Benchmark
class Damavandi(Benchmark):
r"""
"""
Damavandi objective function.
This class defines the Damavandi [1]_ global optimization problem. This is a
@ -64,7 +68,8 @@ class Damavandi(Benchmark):
class Deb01(Benchmark):
r"""
"""
Deb 1 objective function.
This class defines the Deb 1 [1]_ global optimization problem. This is a
@ -86,11 +91,12 @@ class Deb01(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.change_dimensionality = True
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.3, -0.3]]
@ -102,7 +108,8 @@ class Deb01(Benchmark):
class Deb03(Benchmark):
r"""
"""
Deb 3 objective function.
This class defines the Deb 3 [1]_ global optimization problem. This is a
@ -110,7 +117,7 @@ class Deb03(Benchmark):
.. math::
f_{\text{Deb03}}(x) = - \frac{1}{N} \sum_{i=1}^n \sin^6 \left[ 5 \pi
f_{\text{Deb02}}(x) = - \frac{1}{N} \sum_{i=1}^n \sin^6 \left[ 5 \pi
\left ( x_i^{3/4} - 0.05 \right) \right ]
@ -125,13 +132,13 @@ class Deb03(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
# lower limit changed to zero because of fractional power
self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))
self.change_dimensionality = True
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.93388314, 0.68141781]]
self.fglob = -1.0
@ -143,7 +150,8 @@ class Deb03(Benchmark):
class Decanomial(Benchmark):
r"""
"""
Decanomial objective function.
This class defines the Decanomial function global optimization problem. This
@ -186,7 +194,8 @@ class Decanomial(Benchmark):
class Deceptive(Benchmark):
r"""
"""
Deceptive objective function.
This class defines the Deceptive [1]_ global optimization problem. This is a
@ -227,7 +236,6 @@ class Deceptive(Benchmark):
is based on his code. His code and the website don't match, the equations
are wrong.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -238,6 +246,7 @@ class Deceptive(Benchmark):
self.global_optimum = [alpha]
self.fglob = -1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -265,7 +274,8 @@ class Deceptive(Benchmark):
class DeckkersAarts(Benchmark):
r"""
"""
Deckkers-Aarts objective function.
This class defines the Deckkers-Aarts [1]_ global optimization problem. This
@ -305,7 +315,8 @@ class DeckkersAarts(Benchmark):
class DeflectedCorrugatedSpring(Benchmark):
r"""
"""
DeflectedCorrugatedSpring objective function.
This class defines the Deflected Corrugated Spring [1]_ function global
@ -333,7 +344,6 @@ class DeflectedCorrugatedSpring(Benchmark):
below is different to the equation above. Also, the global minimum is
wrong.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -343,6 +353,7 @@ class DeflectedCorrugatedSpring(Benchmark):
self.global_optimum = [[alpha for _ in range(self.N)]]
self.fglob = -1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -353,7 +364,8 @@ class DeflectedCorrugatedSpring(Benchmark):
class DeVilliersGlasser01(Benchmark):
r"""
"""
DeVilliers-Glasser 1 objective function.
This class defines the DeVilliers-Glasser 1 [1]_ function global optimization
@ -398,11 +410,12 @@ class DeVilliersGlasser01(Benchmark):
class DeVilliersGlasser02(Benchmark):
r"""
"""
DeVilliers-Glasser 2 objective function.
This class defines the DeVilliers-Glasser 2 [1]_ function global optimization
problem. This is a multimodal minimization problem defined as follows:
This class defines the DeVilliers-Glasser 2 [1]_ function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
@ -445,7 +458,8 @@ class DeVilliersGlasser02(Benchmark):
class DixonPrice(Benchmark):
r"""
"""
Dixon and Price objective function.
This class defines the Dixon and Price global optimization problem. This
@ -469,7 +483,6 @@ class DixonPrice(Benchmark):
TODO: Gavana code not correct. i array should start from 2.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -480,6 +493,7 @@ class DixonPrice(Benchmark):
self.global_optimum = [[2.0 ** (-(2.0 ** i - 2.0) / 2.0 ** i)
for i in range(1, self.N + 1)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -490,7 +504,8 @@ class DixonPrice(Benchmark):
class Dolan(Benchmark):
r"""
"""
Dolan objective function.
This class defines the Dolan [1]_ global optimization problem. This
@ -504,7 +519,7 @@ class Dolan(Benchmark):
with :math:`x_i \in [-100, 100]` for :math:`i = 1, ..., 5`.
*Global optimum*: :math:`f(x_i) = 10^{-5}` for
*Global optimum*: :math:`f(x_i) = 10^{-5}` for
:math:`x = [8.39045925, 4.81424707, 7.34574133, 68.88246895, 3.85470806]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
@ -532,7 +547,8 @@ class Dolan(Benchmark):
class DropWave(Benchmark):
r"""
"""
DropWave objective function.
This class defines the DropWave [1]_ global optimization problem. This is a

View File

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import abs, asarray, cos, exp, arange, pi, sin, sqrt, sum
from .go_benchmark import Benchmark
@ -27,8 +30,7 @@ class Easom(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO Gavana website disagrees with Jamil, etc.
Gavana equation in docstring is totally wrong.
TODO Gavana website disagrees with Jamil, etc. Gavana equation in docstring is totally wrong.
"""
def __init__(self, dimensions=2):
@ -52,7 +54,7 @@ class Eckerle4(Benchmark):
Eckerle, K., NIST (1979).
Circular Interference Transmittance Study.
..[1] https://www.itl.nist.gov/div898/strd/nls/data/eckerle4.shtml
..[1] http://www.itl.nist.gov/div898/strd/nls/data/eckerle4.shtml
#TODO, this is a NIST regression standard dataset, docstring needs
improving
@ -162,7 +164,6 @@ class EggHolder(Benchmark):
TODO: Jamil is missing a minus sign on the fglob value
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -172,6 +173,7 @@ class EggHolder(Benchmark):
self.global_optimum = [[512.0, 404.2319]]
self.fglob = -959.640662711
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -288,7 +290,6 @@ class Exponential(Benchmark):
TODO Jamil are missing a minus sign on fglob
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -297,6 +298,7 @@ class Exponential(Benchmark):
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = -1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1

View File

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from .go_benchmark import Benchmark

View File

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import abs, sin, cos, exp, floor, log, arange, prod, sqrt, sum
@ -153,7 +156,6 @@ class Griewank(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -164,6 +166,7 @@ class Griewank(Benchmark):
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1

View File

@ -1,5 +1,9 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import abs, arctan2, asarray, cos, exp, arange, pi, sin, sqrt, sum
from numpy import (abs, arctan2, asarray, cos, exp, floor, log, log10,
arange, pi, sign, sin, sqrt, sum, tan, tanh)
from .go_benchmark import Benchmark
@ -229,7 +233,7 @@ class HelicalValley(Benchmark):
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 0, 0]`
.. [1] Fletcher, R. & Powell, M. A Rapidly Convergent Descent Method for
Minimization, Computer Journal, 1963, 62, 163-168
Minimzation, Computer Journal, 1963, 62, 163-168
TODO: Jamil equation is different to original reference. The above paper
can be obtained from

View File

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import sin, sum
from .go_benchmark import Benchmark
@ -24,7 +27,6 @@ class Infinity(Benchmark):
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -33,6 +35,7 @@ class Infinity(Benchmark):
self.global_optimum = [[1e-16 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1

View File

@ -1,8 +1,12 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import sum, asarray, arange, exp
from .go_benchmark import Benchmark
class JennrichSampson(Benchmark):
r"""
Jennrich-Sampson objective function.
@ -42,7 +46,8 @@ class JennrichSampson(Benchmark):
class Judge(Benchmark):
r"""
"""
Judge objective function.
This class defines the Judge [1]_ global optimization problem. This
@ -50,7 +55,7 @@ class Judge(Benchmark):
.. math::
f_{\text{Judge}}(x) = \sum_{i=1}^{20}
f_{\text{Judge}}(x) = \sum_{i=1}^{20}
\left [ \left (x_1 + A_i x_2 + B x_2^2 \right ) - C_i \right ]^2

View File

@ -1,4 +1,7 @@
from numpy import asarray, atleast_2d, arange, sin, sqrt, prod, sum, round
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import asarray, atleast_2d, floor, arange, sin, sqrt, prod, sum, round
from .go_benchmark import Benchmark
@ -31,7 +34,6 @@ class Katsuura(Benchmark):
TODO: Adorio has wrong global minimum. Adorio uses round, Gavana docstring
uses floor, but Gavana code uses round. We'll use round...
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -41,6 +43,7 @@ class Katsuura(Benchmark):
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.custom_bounds = [(0, 1), (0, 1)]
self.fglob = 1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -125,7 +128,7 @@ class Kowalik(Benchmark):
*Global optimum*: :math:`f(x) = 0.00030748610` for :math:`x =
[0.192833, 0.190836, 0.123117, 0.135766]`.
..[1] https://www.itl.nist.gov/div898/strd/nls/data/mgh09.shtml
..[1] http://www.itl.nist.gov/div898/strd/nls/data/mgh09.shtml
"""
def __init__(self, dimensions=4):

View File

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import sum, cos, exp, pi, arange, sin
from .go_benchmark import Benchmark
@ -105,7 +108,6 @@ class LennardJones(Benchmark):
"""
change_dimensionality = True
def __init__(self, dimensions=6):
# dimensions is in [6:60]
@ -127,6 +129,7 @@ class LennardJones(Benchmark):
k = int(dimensions / 3)
self.fglob = self.minima[k - 2]
self.change_dimensionality = True
def change_dimensions(self, ndim):
if ndim not in range(6, 61):
@ -203,8 +206,7 @@ class Levy03(Benchmark):
.. math::
f_{\text{Levy03}}(\mathbf{x}) =
\sin^2(\pi y_1)+\sum_{i=1}^{n-1}(y_i-1)^2[1+10\sin^2(\pi y_{i+1})]+(y_n-1)^2
f_{\text{Levy03}}(\mathbf{x}) = \sin^2(\pi y_1)+\sum_{i=1}^{n-1}(y_i-1)^2[1+10\sin^2(\pi y_{i+1})]+(y_n-1)^2
Where, in this exercise:
@ -213,8 +215,7 @@ class Levy03(Benchmark):
y_i=1+\frac{x_i-1}{4}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i=1,...,n`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-10, 10]` for :math:`i=1,...,n`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,...,n`
@ -256,15 +257,11 @@ class Levy05(Benchmark):
.. math::
f_{\text{Levy05}}(\mathbf{x}) =
\sum_{i=1}^{5} i \cos \left[(i-1)x_1 + i \right] \times \sum_{j=1}^{5} j
\cos \left[(j+1)x_2 + j \right] + (x_1 + 1.42513)^2 + (x_2 + 0.80032)^2
f_{\text{Levy05}}(\mathbf{x}) = \sum_{i=1}^{5} i \cos \left[(i-1)x_1 + i \right] \times \sum_{j=1}^{5} j \cos \left[(j+1)x_2 + j \right] + (x_1 + 1.42513)^2 + (x_2 + 0.80032)^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i=1,...,n`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-10, 10]` for :math:`i=1,...,n`.
*Global optimum*: :math:`f(x_i) = -176.1375779` for
:math:`\mathbf{x} = [-1.30685, -1.42485]`.
*Global optimum*: :math:`f(x_i) = -176.1375779` for :math:`\mathbf{x} = [-1.30685, -1.42485]`.
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.

View File

@ -1,9 +1,15 @@
from numpy import (abs, asarray, cos, exp, log, arange, pi, prod, sin, sqrt,
sum, tan)
from .go_benchmark import Benchmark, safe_import
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
with safe_import():
import numpy as np
from numpy import (abs, arctan2, asarray, cos, exp, floor, log, log10,
arange, pi, prod, roll, sign, sin, sqrt, sum, where,
zeros, tan, tanh, dot)
try:
from scipy.special import factorial
except ImportError:
pass
from .go_benchmark import Benchmark
class Matyas(Benchmark):
@ -85,7 +91,7 @@ class Meyer(Benchmark):
r"""
Meyer [1]_ objective function.
..[1] https://www.itl.nist.gov/div898/strd/nls/data/mgh10.shtml
..[1] http://www.itl.nist.gov/div898/strd/nls/data/mgh10.shtml
TODO NIST regression standard
"""
@ -206,7 +212,7 @@ class Mishra01(Benchmark):
f_{\text{Mishra01}}(x) = (1 + x_n)^{x_n}
where
.. math::
@ -223,7 +229,6 @@ class Mishra01(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -233,6 +238,7 @@ class Mishra01(Benchmark):
self.global_optimum = [[1.0 for _ in range(self.N)]]
self.fglob = 2.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -252,26 +258,25 @@ class Mishra02(Benchmark):
.. math::
f_{\text{Mishra02}}({x}) = (1 + x_n)^{x_n}
with
.. math::
x_n = n - \sum_{i=1}^{n-1} \frac{(x_i + x_{i+1})}{2}
Here, :math:`n` represents the number of dimensions and
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [0, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 2` for :math:`x_i = 1`
*Global optimum*: :math:`f(x) = 2` for :math:`x_i = 1`
for :math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -281,6 +286,7 @@ class Mishra02(Benchmark):
self.global_optimum = [[1.0 for _ in range(self.N)]]
self.fglob = 2.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -299,13 +305,13 @@ class Mishra03(Benchmark):
.. math::
f_{\text{Mishra03}}(x) = \sqrt{\lvert \cos{\sqrt{\lvert x_1^2
f_{\text{Mishra03}}(x) = \sqrt{\lvert \cos{\sqrt{\lvert x_1^2
+ x_2^2 \rvert}} \rvert} + 0.01(x_1 + x_2)
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.1999` for
*Global optimum*: :math:`f(x) = -0.1999` for
:math:`x = [-9.99378322, -9.99918927]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
@ -327,8 +333,8 @@ class Mishra03(Benchmark):
def fun(self, x, *args):
self.nfev += 1
return (0.01 * (x[0] + x[1])
+ sqrt(abs(cos(sqrt(abs(x[0] ** 2 + x[1] ** 2))))))
return ((0.01 * (x[0] + x[1])
+ sqrt(abs(cos(sqrt(abs(x[0] ** 2 + x[1] ** 2)))))))
class Mishra04(Benchmark):
@ -367,8 +373,8 @@ class Mishra04(Benchmark):
def fun(self, x, *args):
self.nfev += 1
return (0.01 * (x[0] + x[1])
+ sqrt(abs(sin(sqrt(abs(x[0] ** 2 + x[1] ** 2))))))
return ((0.01 * (x[0] + x[1])
+ sqrt(abs(sin(sqrt(abs(x[0] ** 2 + x[1] ** 2)))))))
class Mishra05(Benchmark):
@ -471,14 +477,13 @@ class Mishra07(Benchmark):
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = \sqrt{n}`
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = \sqrt{n}`
for :math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -488,6 +493,7 @@ class Mishra07(Benchmark):
self.global_optimum = [[sqrt(self.N)
for i in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -606,7 +612,7 @@ class Mishra10(Benchmark):
.. math::
TODO - int(x) should be used instead of floor(x)!!!!!
f_{\text{Mishra10}}({x}) = \left[ \lfloor x_1 \perp x_2 \rfloor -
f_{\text{Mishra10}}({x}) = \left[ \lfloor x_1 \perp x_2 \rfloor -
\lfloor x_1 \rfloor - \lfloor x_2 \rfloor \right]^2
with :math:`x_i \in [-10, 10]` for :math:`i =1, 2`.
@ -661,7 +667,6 @@ class Mishra11(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -671,6 +676,7 @@ class Mishra11(Benchmark):
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -689,7 +695,7 @@ class MultiModal(Benchmark):
.. math::
f_{\text{MultiModal}}(x) = \left( \sum_{i=1}^n \lvert x_i \rvert
f_{\text{MultiModal}}(x) = \left( \sum_{i=1}^n \lvert x_i \rvert
\right) \left( \prod_{i=1}^n \lvert x_i \rvert \right)
@ -701,7 +707,6 @@ class MultiModal(Benchmark):
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -711,6 +716,7 @@ class MultiModal(Benchmark):
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1

View File

@ -1,4 +1,7 @@
from numpy import cos, sqrt, sin, abs
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import cos, sqrt, sum, sin, abs
from .go_benchmark import Benchmark
@ -32,7 +35,6 @@ class NeedleEye(Benchmark):
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -41,6 +43,7 @@ class NeedleEye(Benchmark):
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1

View File

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import sum, cos, exp, pi, asarray
from .go_benchmark import Benchmark

View File

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import (abs, sum, sin, cos, sqrt, log, prod, where, pi, exp, arange,
floor, log10, atleast_2d, zeros)
from .go_benchmark import Benchmark
@ -172,7 +175,6 @@ class Penalty01(Benchmark):
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -182,6 +184,7 @@ class Penalty01(Benchmark):
self.global_optimum = [[-1.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -234,7 +237,6 @@ class Penalty02(Benchmark):
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -244,6 +246,7 @@ class Penalty02(Benchmark):
self.global_optimum = [[1.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -326,7 +329,6 @@ class PermFunction01(Benchmark):
TODO: line 560
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -336,6 +338,7 @@ class PermFunction01(Benchmark):
self.global_optimum = [list(range(1, self.N + 1))]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -344,7 +347,7 @@ class PermFunction01(Benchmark):
k = atleast_2d(arange(self.N) + 1).T
j = atleast_2d(arange(self.N) + 1)
s = (j ** k + b) * ((x / j) ** k - 1)
return sum(sum(s, axis=1) ** 2)
return sum((sum(s, axis=1) ** 2))
class PermFunction02(Benchmark):
@ -374,7 +377,6 @@ class PermFunction02(Benchmark):
TODO: line 582
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -385,6 +387,7 @@ class PermFunction02(Benchmark):
self.global_optimum = [1. / arange(1, self.N + 1)]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -393,7 +396,7 @@ class PermFunction02(Benchmark):
k = atleast_2d(arange(self.N) + 1).T
j = atleast_2d(arange(self.N) + 1)
s = (j + b) * (x ** k - (1. / j) ** k)
return sum(sum(s, axis=1) ** 2)
return sum((sum(s, axis=1) ** 2))
class Pinter(Benchmark):
@ -431,7 +434,6 @@ class Pinter(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -440,6 +442,7 @@ class Pinter(Benchmark):
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -477,7 +480,6 @@ class Plateau(Benchmark):
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -486,6 +488,7 @@ class Plateau(Benchmark):
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 30.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1

View File

@ -1,10 +1,14 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import abs, sum, arange, sqrt
from .go_benchmark import Benchmark
class Qing(Benchmark):
r"""
"""
Qing objective function.
This class defines the Qing [1]_ global optimization problem. This is a
@ -25,7 +29,6 @@ class Qing(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -35,6 +38,7 @@ class Qing(Benchmark):
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [[sqrt(_) for _ in range(1, self.N + 1)]]
self.fglob = 0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -44,7 +48,8 @@ class Qing(Benchmark):
class Quadratic(Benchmark):
r"""
"""
Quadratic objective function.
This class defines the Quadratic [1]_ global optimization problem. This
@ -66,7 +71,6 @@ class Quadratic(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -75,6 +79,7 @@ class Quadratic(Benchmark):
self.custom_bounds = [(0, 1), (0, 1)]
self.global_optimum = [[0.19388, 0.48513]]
self.fglob = -3873.72418
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -84,7 +89,8 @@ class Quadratic(Benchmark):
class Quintic(Benchmark):
r"""
"""
Quintic objective function.
This class defines the Quintic [1]_ global optimization problem. This is a
@ -106,7 +112,6 @@ class Quintic(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -116,6 +121,7 @@ class Quintic(Benchmark):
self.global_optimum = [[-1.0 for _ in range(self.N)]]
self.fglob = 0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1

View File

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import abs, sum, sin, cos, asarray, arange, pi, exp, log, sqrt
from scipy.optimize import rosen
from .go_benchmark import Benchmark
@ -32,7 +35,6 @@ class Rana(Benchmark):
TODO: homemade global minimum here.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -42,6 +44,7 @@ class Rana(Benchmark):
self.global_optimum = [[-300.3376, 500.]]
self.fglob = -500.8021602966615
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -73,7 +76,6 @@ class Rastrigin(Benchmark):
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -81,6 +83,7 @@ class Rastrigin(Benchmark):
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -93,7 +96,7 @@ class Ratkowsky01(Benchmark):
"""
Ratkowsky objective function.
.. [1] https://www.itl.nist.gov/div898/strd/nls/data/ratkowsky3.shtml
.. [1] http://www.itl.nist.gov/div898/strd/nls/data/ratkowsky3.shtml
"""
# TODO, this is a NIST regression standard dataset
@ -145,7 +148,7 @@ class Ratkowsky02(Benchmark):
*Global optimum*: :math:`f(x) = 8.0565229338` for
:math:`x = [7.2462237576e1, 2.6180768402, 6.7359200066e-2]`
.. [1] https://www.itl.nist.gov/div898/strd/nls/data/ratkowsky2.shtml
.. [1] http://www.itl.nist.gov/div898/strd/nls/data/ratkowsky2.shtml
"""
def __init__(self, dimensions=3):
@ -271,7 +274,6 @@ class Rosenbrock(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -281,6 +283,7 @@ class Rosenbrock(Benchmark):
self.global_optimum = [[1 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1

View File

@ -1,5 +1,9 @@
from numpy import (abs, asarray, cos, floor, arange, pi, prod, roll, sin,
sqrt, sum, repeat, atleast_2d, tril)
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import (abs, arctan2, asarray, cos, exp, floor, log, log10,
arange, pi, prod, roll, sign, sin, sqrt, sum,
tan, tanh, dot, repeat, atleast_2d, tril)
from numpy.random import uniform
from .go_benchmark import Benchmark
@ -27,7 +31,6 @@ class Salomon(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -37,6 +40,7 @@ class Salomon(Benchmark):
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -69,7 +73,6 @@ class Sargan(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -79,6 +82,7 @@ class Sargan(Benchmark):
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -167,7 +171,7 @@ class Schaffer02(Benchmark):
def fun(self, x, *args):
self.nfev += 1
num = sin(x[0] ** 2 - x[1] ** 2) ** 2 - 0.5
num = sin((x[0] ** 2 - x[1] ** 2)) ** 2 - 0.5
den = (1 + 0.001 * (x[0] ** 2 + x[1] ** 2)) ** 2
return 0.5 + num / den
@ -314,7 +318,6 @@ class Schwefel01(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -324,6 +327,7 @@ class Schwefel01(Benchmark):
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -356,7 +360,6 @@ class Schwefel02(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -366,6 +369,7 @@ class Schwefel02(Benchmark):
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -399,7 +403,6 @@ class Schwefel04(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -408,6 +411,7 @@ class Schwefel04(Benchmark):
self.global_optimum = [[1.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -478,7 +482,6 @@ class Schwefel20(Benchmark):
TODO: Jamil #122 is incorrect. There shouldn't be a leading minus sign.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -487,6 +490,7 @@ class Schwefel20(Benchmark):
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -518,7 +522,6 @@ class Schwefel21(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -527,6 +530,7 @@ class Schwefel21(Benchmark):
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -558,7 +562,6 @@ class Schwefel22(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -568,6 +571,7 @@ class Schwefel22(Benchmark):
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -596,7 +600,6 @@ class Schwefel26(Benchmark):
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -605,6 +608,7 @@ class Schwefel26(Benchmark):
self.global_optimum = [[420.968746 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -887,7 +891,6 @@ class Shubert01(Benchmark):
TODO: Jamil#133 is missing a prefactor of j before the cos function.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -897,6 +900,8 @@ class Shubert01(Benchmark):
self.fglob = -186.7309
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -929,7 +934,6 @@ class Shubert03(Benchmark):
TODO: Jamil#134 has wrong global minimum value, and is missing a minus sign
before the whole thing.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -939,6 +943,8 @@ class Shubert03(Benchmark):
self.global_optimum = [[5.791794, 5.791794]]
self.fglob = -24.062499
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -971,7 +977,6 @@ class Shubert04(Benchmark):
TODO: Jamil#135 has wrong global minimum value, and is missing a minus sign
before the whole thing.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -981,6 +986,8 @@ class Shubert04(Benchmark):
self.global_optimum = [[-0.80032121, -7.08350592]]
self.fglob = -29.016015
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -1014,7 +1021,6 @@ class SineEnvelope(Benchmark):
TODO: Jamil #136
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -1025,6 +1031,7 @@ class SineEnvelope(Benchmark):
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -1096,7 +1103,6 @@ class Sodp(Benchmark):
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -1105,6 +1111,7 @@ class Sodp(Benchmark):
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -1137,7 +1144,6 @@ class Sphere(Benchmark):
TODO Jamil has stupid limits
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -1145,6 +1151,7 @@ class Sphere(Benchmark):
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -1175,7 +1182,6 @@ class Step(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -1185,6 +1191,7 @@ class Step(Benchmark):
self.global_optimum = [[0. for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -1213,7 +1220,6 @@ class Step2(Benchmark):
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -1223,6 +1229,7 @@ class Step2(Benchmark):
self.global_optimum = [[0.5 for _ in range(self.N)]]
self.fglob = 0.5
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -1254,7 +1261,6 @@ class Stochastic(Benchmark):
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -1263,6 +1269,7 @@ class Stochastic(Benchmark):
self.global_optimum = [[1.0 / _ for _ in range(1, self.N + 1)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -1306,7 +1313,6 @@ class StretchedV(Benchmark):
brackets, in others it is outside. In Jamil#142 it's not even 1. Here
we go with the Adorio option.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -1315,6 +1321,7 @@ class StretchedV(Benchmark):
self.global_optimum = [[0, 0]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -1346,7 +1353,6 @@ class StyblinskiTang(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -1355,6 +1361,7 @@ class StyblinskiTang(Benchmark):
self.global_optimum = [[-2.903534018185960 for _ in range(self.N)]]
self.fglob = -39.16616570377142 * self.N
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1

View File

@ -1,4 +1,9 @@
from numpy import abs, asarray, cos, exp, arange, pi, sin, sum, atleast_2d
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import (abs, arctan2, asarray, cos, exp, floor, log, log10,
arange, pi, sign, sin, sqrt, sum,
tan, tanh, atleast_2d)
from .go_benchmark import Benchmark
@ -51,7 +56,7 @@ class Thurber(Benchmark):
r"""
Thurber [1]_ objective function.
.. [1] https://www.itl.nist.gov/div898/strd/nls/data/thurber.shtml
.. [1] http://www.itl.nist.gov/div898/strd/nls/data/thurber.shtml
"""
def __init__(self, dimensions=7):
@ -238,7 +243,6 @@ class Trid(Benchmark):
TODO Jamil#150, starting index of second summation term should be 2.
"""
change_dimensionality = True
def __init__(self, dimensions=6):
Benchmark.__init__(self, dimensions)
@ -247,6 +251,7 @@ class Trid(Benchmark):
self.global_optimum = [[6, 10, 12, 12, 10, 6]]
self.fglob = -50.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -282,7 +287,6 @@ class Trigonometric01(Benchmark):
TODO: equaiton uncertain here. Is it just supposed to be the cos term
in the inner sum, or the whole of the second line in Jamil #153.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -291,6 +295,7 @@ class Trigonometric01(Benchmark):
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -324,7 +329,6 @@ class Trigonometric02(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -335,6 +339,7 @@ class Trigonometric02(Benchmark):
self.global_optimum = [[0.9 for _ in range(self.N)]]
self.fglob = 1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1

View File

@ -1,4 +1,7 @@
from numpy import abs, sin, cos, pi, sqrt
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import abs, sum, sin, cos, pi, sqrt
from .go_benchmark import Benchmark

View File

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import sum, cos, sin, log
from .go_benchmark import Benchmark
@ -68,7 +71,6 @@ class Vincent(Benchmark):
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -77,6 +79,7 @@ class Vincent(Benchmark):
self.global_optimum = [[7.70628098 for _ in range(self.N)]]
self.fglob = -float(self.N)
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1

View File

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import atleast_2d, arange, sum, cos, exp, pi
from .go_benchmark import Benchmark
@ -85,7 +88,6 @@ class Wavy(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -94,6 +96,7 @@ class Wavy(Benchmark):
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -215,8 +218,6 @@ class Weierstrass(Benchmark):
be included in the outer sum. Mishra code has it right as does the
reference referred to in Jamil#166.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -224,6 +225,7 @@ class Weierstrass(Benchmark):
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -264,7 +266,6 @@ class Whitley(Benchmark):
TODO Jamil#167 has '+ 1' inside the cos term, when it should be outside it.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -275,6 +276,7 @@ class Whitley(Benchmark):
self.global_optimum = [[1.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1

View File

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import abs, sum, sin, cos, pi, exp, arange, prod, sqrt
from .go_benchmark import Benchmark
@ -30,7 +33,6 @@ class XinSheYang01(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -40,6 +42,7 @@ class XinSheYang01(Benchmark):
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -72,7 +75,6 @@ class XinSheYang02(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -82,6 +84,7 @@ class XinSheYang02(Benchmark):
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -116,7 +119,6 @@ class XinSheYang03(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -125,6 +127,7 @@ class XinSheYang03(Benchmark):
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = -1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -162,7 +165,6 @@ class XinSheYang04(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -171,6 +173,7 @@ class XinSheYang04(Benchmark):
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = -1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1

View File

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import abs, sum, cos, pi
from .go_benchmark import Benchmark
@ -21,19 +24,13 @@ class YaoLiu04(Benchmark):
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Yao X., Liu Y. (1997) Fast evolution strategies.
In: Angeline P.J., Reynolds R.G., McDonnell J.R., Eberhart R. (eds)
Evolutionary Programming VI. EP 1997.
Lecture Notes in Computer Science, vol 1213. Springer, Berlin, Heidelberg
.. [2] Mishra, S. Global Optimization by Differential Evolution and
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO line 1201. Gavana code and documentation differ.
max(abs(x)) != abs(max(x))
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -42,6 +39,7 @@ class YaoLiu04(Benchmark):
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -68,14 +66,10 @@ class YaoLiu09(Benchmark):
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Yao X., Liu Y. (1997) Fast evolution strategies.
In: Angeline P.J., Reynolds R.G., McDonnell J.R., Eberhart R. (eds)
Evolutionary Programming VI. EP 1997.
Lecture Notes in Computer Science, vol 1213. Springer, Berlin, Heidelberg
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
.. [2] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO Yao-Liu Fast Evolutionary programming is the the original ref.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -84,6 +78,7 @@ class YaoLiu09(Benchmark):
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1

View File

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import abs, sum, sign, arange
from .go_benchmark import Benchmark
@ -27,7 +30,6 @@ class Zacharov(Benchmark):
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -37,6 +39,7 @@ class Zacharov(Benchmark):
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -69,7 +72,6 @@ class ZeroSum(Benchmark):
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
@ -78,6 +80,7 @@ class ZeroSum(Benchmark):
self.global_optimum = [[]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
@ -174,20 +177,13 @@ class Zimmerman(Benchmark):
self.fglob = 0.0
def fun(self, x, *args):
def Zh1(x):
return 9.0 - x[0] - x[1]
def Zh2(x):
return (x[0] - 3.0) ** 2.0 + (x[1] - 2.0) ** 2.0 - 16.0
def Zh3(x):
return x[0] * x[1] - 14.0
def Zp(x):
return 100.0 * (1.0 + x)
self.nfev += 1
Zh1 = lambda x: 9.0 - x[0] - x[1]
Zh2 = lambda x: (x[0] - 3.0) ** 2.0 + (x[1] - 2.0) ** 2.0 - 16.0
Zh3 = lambda x: x[0] * x[1] - 14.0
Zp = lambda x: 100.0 * (1.0 + x)
return max(Zh1(x),
Zp(Zh2(x)) * sign(Zh2(x)),
Zp(Zh3(x)) * sign(Zh3(x)),

View File

@ -1,8 +1,16 @@
from numpy import cos, exp, log, pi, sin, sqrt
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import (abs, arctan2, asarray, cos, exp, floor, log, log10,
arange, pi, prod, roll, sign, sin, sqrt, sum, where,
zeros, tan, tanh, dot)
try:
from scipy.misc import factorial
except ImportError:
pass
from .go_benchmark import Benchmark
#-----------------------------------------------------------------------
# UNIVARIATE SINGLE-OBJECTIVE PROBLEMS
#-----------------------------------------------------------------------
@ -215,8 +223,7 @@ class Problem07(Benchmark):
.. math::
f_{\\text{Problem07}}(x) = \\sin(x) + \\sin \\left(\\frac{10}{3}x
\\right) + \\log(x) - 0.84x + 3
f_{\\text{Problem07}}(x) = \\sin(x) + \\sin \\left(\\frac{10}{3}x \\right) + \\log(x) - 0.84x + 3
Bound constraints: :math:`x \\in [2.7, 7.5]`
@ -572,9 +579,8 @@ class Problem18(Benchmark):
.. math::
f_{\\text{Problem18}}(x)
= \\begin{cases}(x-2)^2 & \\textrm{if} \\hspace{5pt} x
\\leq 3 \\\\ 2\\log(x-2)+1&\\textrm{otherwise}\\end{cases}
f_{\\text{Problem18}}(x) = \\begin{cases}(x-2)^2 & \\textrm{if} \\hspace{5pt} x \\leq 3 \\\\
2\\log(x-2)+1&\\textrm{otherwise}\\end{cases}
Bound constraints: :math:`x \\in [0, 6]`

View File

@ -1,33 +1,33 @@
from __future__ import division, absolute_import, print_function
import numpy as np
from .common import Benchmark, safe_import, is_xslow
from .common import Benchmark
from scipy.integrate import quad, cumulative_simpson, nquad, quad_vec, cubature
from scipy.integrate import quad
from concurrent.futures import ThreadPoolExecutor
from asv_runner.benchmarks.mark import SkipNotImplemented
with safe_import():
try:
import ctypes
import scipy.integrate._test_multivariate as clib_test
from scipy._lib import _ccallback_c
except ImportError:
_ccallback_c = None
with safe_import() as exc:
try:
from scipy import LowLevelCallable
from_cython = LowLevelCallable.from_cython
if exc.error:
def LowLevelCallable(func, data):
return (func, data)
except ImportError:
LowLevelCallable = lambda func, data: (func, data)
from_cython = lambda *a: a
def from_cython(*a):
return a
with safe_import() as exc:
try:
import cffi
if exc.error:
cffi = None # noqa: F811
except ImportError:
cffi = None
with safe_import():
try:
from scipy.integrate import solve_bvp
except ImportError:
pass
class SolveBVP(Benchmark):
@ -105,8 +105,7 @@ class Quad(Benchmark):
voidp = ctypes.cast(self.f_ctypes, ctypes.c_void_p)
address = voidp.value
ffi = cffi.FFI()
self.f_cffi = LowLevelCallable(ffi.cast("double (*)(int, double *)",
address))
self.f_cffi = LowLevelCallable(ffi.cast("double (*)(int, double *)", address))
def time_quad_python(self):
quad(self.f_python, 0, np.pi)
@ -119,286 +118,3 @@ class Quad(Benchmark):
def time_quad_cffi(self):
quad(self.f_cffi, 0, np.pi)
class CumulativeSimpson(Benchmark):
def setup(self) -> None:
x, self.dx = np.linspace(0, 5, 1000, retstep=True)
self.y = np.sin(2*np.pi*x)
self.y2 = np.tile(self.y, (100, 100, 1))
def time_1d(self) -> None:
cumulative_simpson(self.y, dx=self.dx)
def time_multid(self) -> None:
cumulative_simpson(self.y2, dx=self.dx)
class NquadSphere(Benchmark):
params = (
[1e-9, 1e-10, 1e-11],
)
param_names = ["rtol"]
def setup(self, rtol):
self.a = np.array([0, 0, 0])
self.b = np.array([1, 2*np.pi, np.pi])
self.rtol = rtol
self.atol = 0
def f(self, r, theta, phi):
return r**2 * np.sin(phi)
def time_sphere(self, rtol):
nquad(
func=self.f,
ranges=[
(0, 1),
(0, 2*np.pi),
(0, np.pi),
],
opts={
"epsabs": self.rtol,
},
)
class NquadOscillatory(Benchmark):
params = (
# input dimension of integrand (ndim)
[1, 3, 5],
# rtol
[1e-10, 1e-11],
)
param_names = ["ndim", "rtol"]
def setup(self, ndim, rtol):
self.ndim = ndim
self.rtol = rtol
self.atol = 0
self.ranges = [(0, 1) for _ in range(self.ndim)]
if ndim == 5 and not is_xslow():
raise SkipNotImplemented("Takes too long to run in CI")
def f(self, *x):
x_arr = np.array(x)
r = 0.5
alphas = np.repeat(0.1, self.ndim)
return np.cos(2*np.pi*r + np.sum(alphas * x_arr, axis=-1))
def time_oscillatory(self, ndim, rtol):
nquad(
func=self.f,
ranges=self.ranges,
opts={
"epsabs": self.rtol,
},
)
class QuadVecOscillatory(Benchmark):
params = (
# output dimension of integrand (fdim)
[1, 5, 8],
# rtol
[1e-10, 1e-11],
)
param_names = ["fdim", "rtol"]
def setup(self, fdim, rtol):
self.fdim = fdim
self.rtol = rtol
self.atol = 0
self.a = 0
self.b = 1
self.pool = ThreadPoolExecutor(2)
def f(self, x):
r = np.repeat(0.5, self.fdim)
alphas = np.repeat(0.1, self.fdim)
return np.cos(2*np.pi*r + alphas * x)
def time_plain(self, fdim, rtol):
quad_vec(
f=self.f,
a=self.a,
b=self.b,
epsrel=self.rtol,
)
def time_threads(self, fdim, rtol):
quad_vec(
f=self.f,
a=self.a,
b=self.b,
epsrel=self.rtol,
workers=self.pool.map,
)
def track_subdivisions(self, fdim, rtol):
_, _, info = quad_vec(
f=self.f,
a=self.a,
b=self.b,
epsrel=self.rtol,
full_output=True,
)
return info.intervals.shape[0]
class CubatureSphere(Benchmark):
params = (
[
"gk15",
"gk21",
"genz-malik",
],
[1e-9, 1e-10, 1e-11],
)
param_names = ["rule", "rtol"]
def setup(self, rule, rtol):
self.a = np.array([0, 0, 0])
self.b = np.array([1, 2*np.pi, np.pi])
self.rule = rule
self.rtol = rtol
self.atol = 0
self.pool = ThreadPoolExecutor(2)
def f(self, x):
r = x[:, 0]
phi = x[:, 2]
return r**2 * np.sin(phi)
def time_plain(self, rule, rtol):
cubature(
f=self.f,
a=self.a,
b=self.b,
rule=self.rule,
rtol=self.rtol,
atol=self.atol,
)
def time_threads(self, rule, rtol):
cubature(
f=self.f,
a=self.a,
b=self.b,
rule=self.rule,
rtol=self.rtol,
atol=self.atol,
workers=self.pool.map,
)
def track_subdivisions(self, rule, rtol):
res = cubature(
f=self.f,
a=self.a,
b=self.b,
rule=self.rule,
rtol=self.rtol,
atol=self.atol,
)
return res.subdivisions
class CubatureOscillatory(Benchmark):
params = (
# rule
[
"genz-malik",
"gk15",
"gk21",
],
# input dimension of integrand (ndim)
[1, 3, 5],
# output dimension of integrand (fdim)
[1, 8],
# rtol
[1e-10, 1e-11],
)
param_names = ["rule", "ndim", "fdim", "rtol"]
def setup(self, rule, ndim, fdim, rtol):
self.ndim = ndim
self.fdim = fdim
self.rtol = rtol
self.atol = 0
self.a = np.zeros(self.ndim)
self.b = np.repeat(1, self.ndim)
self.rule = rule
self.pool = ThreadPoolExecutor(2)
if rule == "genz-malik" and ndim == 1:
raise SkipNotImplemented(f"{rule} not defined for 1D integrals")
if (rule == "gk-15" or rule == "gk-21") and ndim > 5:
raise SkipNotImplemented(f"{rule} uses too much memory for ndim > 5")
if rule == "gk-21" and ndim >= 5 and fdim == 8 and not is_xslow():
raise SkipNotImplemented("Takes too long to run in CI")
def f(self, x):
npoints, ndim = x.shape[0], x.shape[-1]
r = np.repeat(0.5, self.fdim)
alphas = np.repeat(0.1, self.fdim * ndim).reshape(self.fdim, ndim)
x_reshaped = x.reshape(npoints, *([1]*(len(alphas.shape) - 1)), ndim)
return np.cos(2*np.pi*r + np.sum(alphas * x_reshaped, axis=-1))
def time_plain(self, rule, ndim, fdim, rtol):
cubature(
f=self.f,
a=self.a,
b=self.b,
rule=self.rule,
rtol=self.rtol,
atol=self.atol,
)
def time_threads(self, rule, ndim, fdim, rtol):
cubature(
f=self.f,
a=self.a,
b=self.b,
rule=self.rule,
rtol=self.rtol,
atol=self.atol,
workers=self.pool.map,
)
def track_subdivisions(self, rule, ndim, fdim, rtol):
return cubature(
f=self.f,
a=self.a,
b=self.b,
rule=self.rule,
rtol=self.rtol,
atol=self.atol,
).subdivisions

View File

@ -1,15 +1,18 @@
from __future__ import division, absolute_import, print_function
import numpy as np
from .common import run_monitored, set_mem_rlimit, Benchmark, safe_import
from .common import run_monitored, set_mem_rlimit, Benchmark
with safe_import():
try:
from scipy.stats import spearmanr
except ImportError:
pass
with safe_import():
try:
import scipy.interpolate as interpolate
with safe_import():
from scipy.sparse import csr_matrix
except ImportError:
pass
class Leaks(Benchmark):
@ -23,7 +26,7 @@ class Leaks(Benchmark):
peak_mems = []
for repeat in repeats:
code = f"""
code = """
import numpy as np
from scipy.interpolate import griddata
@ -34,10 +37,12 @@ class Leaks(Benchmark):
points = np.random.rand(1000, 2)
values = func(points[:,0], points[:,1])
for t in range({repeat}):
for t in range(%(repeat)d):
for method in ['nearest', 'linear', 'cubic']:
griddata(points, values, (grid_x, grid_y), method=method)
"""
""" % dict(repeat=repeat)
_, peak_mem = run_monitored(code)
peak_mems.append(peak_mem)
@ -55,10 +60,10 @@ class Leaks(Benchmark):
class BenchPPoly(Benchmark):
def setup(self):
rng = np.random.default_rng(1234)
np.random.seed(1234)
m, k = 55, 3
x = np.sort(rng.random(m+1))
c = rng.random((k, m))
x = np.sort(np.random.random(m+1))
c = np.random.random((3, m))
self.pp = interpolate.PPoly(c, x)
npts = 100
@ -74,7 +79,7 @@ class GridData(Benchmark):
[10j, 100j, 1000j],
['nearest', 'linear', 'cubic']
]
def setup(self, n_grids, method):
self.func = lambda x, y: x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
self.grid_x, self.grid_y = np.mgrid[0:1:n_grids, 0:1:n_grids]
@ -82,36 +87,8 @@ class GridData(Benchmark):
self.values = self.func(self.points[:, 0], self.points[:, 1])
def time_evaluation(self, n_grids, method):
interpolate.griddata(self.points, self.values, (self.grid_x, self.grid_y),
method=method)
interpolate.griddata(self.points, self.values, (self.grid_x, self.grid_y), method=method)
class GridDataPeakMem(Benchmark):
"""
Benchmark based on https://github.com/scipy/scipy/issues/20357
"""
def setup(self):
shape = (7395, 6408)
num_nonzero = 488686
rng = np.random.default_rng(1234)
random_rows = rng.integers(0, shape[0], num_nonzero)
random_cols = rng.integers(0, shape[1], num_nonzero)
random_values = rng.random(num_nonzero, dtype=np.float32)
sparse_matrix = csr_matrix((random_values, (random_rows, random_cols)),
shape=shape, dtype=np.float32)
sparse_matrix = sparse_matrix.toarray()
self.coords = np.column_stack(np.nonzero(sparse_matrix))
self.values = sparse_matrix[self.coords[:, 0], self.coords[:, 1]]
self.grid_x, self.grid_y = np.mgrid[0:sparse_matrix.shape[0],
0:sparse_matrix.shape[1]]
def peakmem_griddata(self):
interpolate.griddata(self.coords, self.values, (self.grid_x, self.grid_y),
method='cubic')
class Interpolate1d(Benchmark):
param_names = ['n_samples', 'method']
@ -149,13 +126,15 @@ class Interpolate2d(Benchmark):
self.xx, self.yy = np.meshgrid(self.x, self.y)
self.z = np.sin(self.xx**2+self.yy**2)
def time_interpolate(self, n_samples, method):
interpolate.interp2d(self.x, self.y, self.z, kind=method)
class Rbf(Benchmark):
param_names = ['n_samples', 'function']
params = [
[10, 50, 100],
['multiquadric', 'inverse', 'gaussian', 'linear',
'cubic', 'quintic', 'thin_plate']
['multiquadric', 'inverse', 'gaussian', 'linear', 'cubic', 'quintic', 'thin_plate']
]
def setup(self, n_samples, function):
@ -173,32 +152,6 @@ class Rbf(Benchmark):
interpolate.Rbf(self.X, self.Y, self.z, function=function)
class RBFInterpolator(Benchmark):
param_names = ['neighbors', 'n_samples', 'kernel']
params = [
[None, 50],
[10, 100, 1000],
['linear', 'thin_plate_spline', 'cubic', 'quintic', 'multiquadric',
'inverse_multiquadric', 'inverse_quadratic', 'gaussian']
]
def setup(self, neighbors, n_samples, kernel):
rng = np.random.RandomState(0)
self.y = rng.uniform(-1, 1, (n_samples, 2))
self.x = rng.uniform(-1, 1, (n_samples, 2))
self.d = np.sum(self.y, axis=1)*np.exp(-6*np.sum(self.y**2, axis=1))
def time_rbf_interpolator(self, neighbors, n_samples, kernel):
interp = interpolate.RBFInterpolator(
self.y,
self.d,
neighbors=neighbors,
epsilon=5.0,
kernel=kernel
)
interp(self.x)
class UnivariateSpline(Benchmark):
param_names = ['n_samples', 'degree']
params = [
@ -247,8 +200,7 @@ class BivariateSpline(Benchmark):
interpolate.SmoothBivariateSpline(self.x, self.y, self.z)
def time_lsq_bivariate_spline(self, n_samples):
interpolate.LSQBivariateSpline(self.x, self.y, self.z,
self.xknots.flat, self.yknots.flat)
interpolate.LSQBivariateSpline(self.x, self.y, self.z, self.xknots.flat, self.yknots.flat)
class Interpolate(Benchmark):
@ -271,305 +223,3 @@ class Interpolate(Benchmark):
interpolate.interp1d(self.x, self.y, kind="linear")
else:
np.interp(self.z, self.x, self.y)
class RegularGridInterpolator(Benchmark):
"""
Benchmark RegularGridInterpolator with method="linear".
"""
param_names = ['ndim', 'max_coord_size', 'n_samples', 'flipped']
params = [
[2, 3, 4],
[10, 40, 200],
[10, 100, 1000, 10000],
[1, -1]
]
def setup(self, ndim, max_coord_size, n_samples, flipped):
rng = np.random.default_rng(314159)
# coordinates halve in size over the dimensions
coord_sizes = [max_coord_size // 2**i for i in range(ndim)]
self.points = [np.sort(rng.random(size=s))[::flipped]
for s in coord_sizes]
self.values = rng.random(size=coord_sizes)
# choose in-bounds sample points xi
bounds = [(p.min(), p.max()) for p in self.points]
xi = [rng.uniform(low, high, size=n_samples)
for low, high in bounds]
self.xi = np.array(xi).T
self.interp = interpolate.RegularGridInterpolator(
self.points,
self.values,
)
def time_rgi_setup_interpolator(self, ndim, max_coord_size,
n_samples, flipped):
self.interp = interpolate.RegularGridInterpolator(
self.points,
self.values,
)
def time_rgi(self, ndim, max_coord_size, n_samples, flipped):
self.interp(self.xi)
class RGI_Cubic(Benchmark):
"""
Benchmark RegularGridInterpolator with method="cubic".
"""
param_names = ['ndim', 'n_samples', 'method']
params = [
[2],
[10, 40, 100, 200, 400],
['cubic', 'cubic_legacy']
]
def setup(self, ndim, n_samples, method):
rng = np.random.default_rng(314159)
self.points = [np.sort(rng.random(size=n_samples))
for _ in range(ndim)]
self.values = rng.random(size=[n_samples]*ndim)
# choose in-bounds sample points xi
bounds = [(p.min(), p.max()) for p in self.points]
xi = [rng.uniform(low, high, size=n_samples)
for low, high in bounds]
self.xi = np.array(xi).T
self.interp = interpolate.RegularGridInterpolator(
self.points,
self.values,
method=method
)
def time_rgi_setup_interpolator(self, ndim, n_samples, method):
self.interp = interpolate.RegularGridInterpolator(
self.points,
self.values,
method=method
)
def time_rgi(self, ndim, n_samples, method):
self.interp(self.xi)
class RGI_Quintic(Benchmark):
"""
Benchmark RegularGridInterpolator with method="quintic".
"""
param_names = ['ndim', 'n_samples', 'method']
params = [
[2],
[10, 40],
]
def setup(self, ndim, n_samples):
rng = np.random.default_rng(314159)
self.points = [np.sort(rng.random(size=n_samples))
for _ in range(ndim)]
self.values = rng.random(size=[n_samples]*ndim)
# choose in-bounds sample points xi
bounds = [(p.min(), p.max()) for p in self.points]
xi = [rng.uniform(low, high, size=n_samples)
for low, high in bounds]
self.xi = np.array(xi).T
self.interp = interpolate.RegularGridInterpolator(
self.points,
self.values,
method='quintic'
)
def time_rgi_setup_interpolator(self, ndim, n_samples):
self.interp = interpolate.RegularGridInterpolator(
self.points,
self.values,
method='quintic'
)
def time_rgi(self, ndim, n_samples):
self.interp(self.xi)
class RegularGridInterpolatorValues(interpolate.RegularGridInterpolator):
def __init__(self, points, xi, **kwargs):
# create fake values for initialization
values = np.zeros(tuple([len(pt) for pt in points]))
super().__init__(points, values, **kwargs)
self._is_initialized = False
# precompute values
(self.xi, self.xi_shape, self.ndim,
self.nans, self.out_of_bounds) = self._prepare_xi(xi)
self.indices, self.norm_distances = self._find_indices(xi.T)
self._is_initialized = True
def _prepare_xi(self, xi):
if not self._is_initialized:
return super()._prepare_xi(xi)
else:
# just give back precomputed values
return (self.xi, self.xi_shape, self.ndim,
self.nans, self.out_of_bounds)
def _find_indices(self, xi):
if not self._is_initialized:
return super()._find_indices(xi)
else:
# just give back pre-computed values
return self.indices, self.norm_distances
def __call__(self, values, method=None):
values = self._check_values(values)
# check fillvalue
self._check_fill_value(values, self.fill_value)
# check dimensionality
self._check_dimensionality(self.grid, values)
# flip, if needed
self._values = np.flip(values, axis=self._descending_dimensions)
return super().__call__(self.xi, method=method)
class RegularGridInterpolatorSubclass(Benchmark):
"""
Benchmark RegularGridInterpolator with method="linear".
"""
param_names = ['ndim', 'max_coord_size', 'n_samples', 'flipped']
params = [
[2, 3, 4],
[10, 40, 200],
[10, 100, 1000, 10000],
[1, -1]
]
def setup(self, ndim, max_coord_size, n_samples, flipped):
rng = np.random.default_rng(314159)
# coordinates halve in size over the dimensions
coord_sizes = [max_coord_size // 2**i for i in range(ndim)]
self.points = [np.sort(rng.random(size=s))[::flipped]
for s in coord_sizes]
self.values = rng.random(size=coord_sizes)
# choose in-bounds sample points xi
bounds = [(p.min(), p.max()) for p in self.points]
xi = [rng.uniform(low, high, size=n_samples)
for low, high in bounds]
self.xi = np.array(xi).T
self.interp = RegularGridInterpolatorValues(
self.points,
self.xi,
)
def time_rgi_setup_interpolator(self, ndim, max_coord_size,
n_samples, flipped):
self.interp = RegularGridInterpolatorValues(
self.points,
self.xi,
)
def time_rgi(self, ndim, max_coord_size, n_samples, flipped):
self.interp(self.values)
class CloughTocherInterpolatorValues(interpolate.CloughTocher2DInterpolator):
"""Subclass of the CT2DInterpolator with optional `values`.
This is mainly a demo of the functionality. See
https://github.com/scipy/scipy/pull/18376 for discussion
"""
def __init__(self, points, xi, tol=1e-6, maxiter=400, **kwargs):
interpolate.CloughTocher2DInterpolator.__init__(self, points, None,
tol=tol, maxiter=maxiter)
self.xi = None
self._preprocess_xi(*xi)
def _preprocess_xi(self, *args):
if self.xi is None:
self.xi, self.interpolation_points_shape = (
interpolate.CloughTocher2DInterpolator._preprocess_xi(self, *args)
)
return self.xi, self.interpolation_points_shape
def __call__(self, values):
self._set_values(values)
return super().__call__(self.xi)
class CloughTocherInterpolatorSubclass(Benchmark):
"""
Benchmark CloughTocherInterpolatorValues.
Derived from the docstring example,
https://docs.scipy.org/doc/scipy-1.11.2/reference/generated/scipy.interpolate.CloughTocher2DInterpolator.html
"""
param_names = ['n_samples']
params = [10, 50, 100]
def setup(self, n_samples):
rng = np.random.default_rng(314159)
x = rng.random(n_samples) - 0.5
y = rng.random(n_samples) - 0.5
self.z = np.hypot(x, y)
X = np.linspace(min(x), max(x))
Y = np.linspace(min(y), max(y))
self.X, self.Y = np.meshgrid(X, Y)
self.interp = CloughTocherInterpolatorValues(
list(zip(x, y)), (self.X, self.Y)
)
def time_clough_tocher(self, n_samples):
self.interp(self.z)
class AAA(Benchmark):
def setup(self):
self.z = np.exp(np.linspace(-0.5, 0.5 + 15j*np.pi, num=1000))
self.pts = np.linspace(-1, 1, num=1000)
def time_AAA(self):
r = interpolate.AAA(self.z, np.tan(np.pi*self.z/2))
r(self.pts)
r.poles()
r.residues()
r.roots()
class NearestNDInterpolator(Benchmark):
"""
Benchmark NearestNDInterpolator.
Derived from the docstring example,
https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.NearestNDInterpolator.html
"""
param_names = ['n_samples', 'grid']
params = [
[10, 100, 1000],
[50, 100, 500]
]
def setup(self, n_samples, grid):
rng = np.random.default_rng(20191102)
self.x = x = rng.random(n_samples) - 0.5
self.y = y = rng.random(n_samples) - 0.5
self.z = np.hypot(x, y)
X = np.linspace(min(x), max(x), num=grid)
Y = np.linspace(min(y), max(y), num=grid)
self.X, self.Y = np.meshgrid(X, Y)
def time_nearest_ND_interpolator(self, n_samples, grid):
interp = interpolate.NearestNDInterpolator(
list(zip(self.x, self.y)), self.z)
interp(self.X, self.Y)

View File

@ -1,14 +1,19 @@
from __future__ import division, absolute_import, print_function
from .common import set_mem_rlimit, run_monitored, get_mem_info
import os
import tempfile
import collections
from io import BytesIO
import numpy as np
from .common import Benchmark, safe_import
with safe_import():
try:
from scipy.io import savemat, loadmat
except ImportError:
pass
from .common import Benchmark
class MemUsage(Benchmark):
@ -18,17 +23,17 @@ class MemUsage(Benchmark):
@property
def params(self):
return [list(self._get_sizes().keys()), [True, False]]
return [self._get_sizes().keys(), [True, False]]
def _get_sizes(self):
sizes = {
'1M': 1e6,
'10M': 10e6,
'100M': 100e6,
'300M': 300e6,
# '500M': 500e6,
# '1000M': 1000e6,
}
sizes = collections.OrderedDict([
('1M', 1e6),
('10M', 10e6),
('100M', 100e6),
('300M', 300e6),
# ('500M', 500e6),
# ('1000M', 1000e6),
])
return sizes
def setup(self, size, compressed):
@ -62,27 +67,23 @@ class MemUsage(Benchmark):
savemat(self.filename, dict(x=x), do_compression=compressed, oned_as='row')
del x
code = f"""
code = """
from scipy.io import loadmat
loadmat('{self.filename}')
"""
loadmat('%s')
""" % (self.filename,)
time, peak_mem = run_monitored(code)
return peak_mem / size
def track_savemat(self, size, compressed):
size = int(self.sizes[size])
code = f"""
code = """
import numpy as np
from scipy.io import savemat
x = np.random.rand({size}//8).view(dtype=np.uint8)
savemat(
'{self.filename}',
dict(x=x),
do_compression={compressed},
oned_as='row'
)
"""
x = np.random.rand(%d//8).view(dtype=np.uint8)
savemat('%s', dict(x=x), do_compression=%r, oned_as='row')
""" % (size, self.filename, compressed)
time, peak_mem = run_monitored(code)
return peak_mem / size
@ -98,8 +99,8 @@ class StructArr(Benchmark):
def make_structarr(n_vars, n_fields, n_structs):
var_dict = {}
for vno in range(n_vars):
vname = f'var{vno:02d}'
end_dtype = [(f'f{d}', 'i4', 10) for d in range(n_fields)]
vname = 'var%00d' % vno
end_dtype = [('f%d' % d, 'i4', 10) for d in range(n_fields)]
s_arrs = np.zeros((n_structs,), dtype=end_dtype)
var_dict[vname] = s_arrs
return var_dict

View File

@ -1,206 +0,0 @@
from .common import set_mem_rlimit, run_monitored, get_mem_info
from io import BytesIO, StringIO
import os
import tempfile
import numpy as np
from .common import Benchmark, safe_import
with safe_import():
import scipy.sparse
import scipy.io._mmio
import scipy.io._fast_matrix_market
from scipy.io._fast_matrix_market import mmwrite
def generate_coo(size):
nnz = int(size / (4 + 4 + 8))
rows = np.arange(nnz, dtype=np.int32)
cols = np.arange(nnz, dtype=np.int32)
data = np.random.default_rng().uniform(low=0, high=1.0, size=nnz)
return scipy.sparse.coo_matrix((data, (rows, cols)), shape=(nnz, nnz))
def generate_csr(size):
nrows = 1000
nnz = int((size - (nrows + 1) * 4) / (4 + 8))
indptr = (np.arange(nrows + 1, dtype=np.float32) / nrows * nnz).astype(np.int32)
indptr[-1] = nnz
indices = np.arange(nnz, dtype=np.int32)
data = np.random.default_rng().uniform(low=0, high=1.0, size=nnz)
return scipy.sparse.csr_matrix((data, indices, indptr), shape=(nrows, nnz))
def generate_dense(size):
nnz = size // 8
return np.random.default_rng().uniform(low=0, high=1.0, size=(1, nnz))
class MemUsage(Benchmark):
param_names = ['size', 'implementation', 'matrix_type']
timeout = 4*60
unit = "actual/optimal memory usage ratio"
@property
def params(self):
return [
list(self._get_size().keys()),
['scipy.io'],
['dense', 'coo'] # + ['csr']
]
def _get_size(self):
size = {
'1M': int(1e6),
'10M': int(10e6),
'25M': int(10e6),
# Note: the below sizes should work locally but cause issues on CircleCI
# it fails to allocate memory even though there is easily enough
# available (see gh-22574).
#'100M': int(100e6),
#'300M': int(300e6),
# '500M': int(500e6),
# '1000M': int(1000e6),
}
return size
def setup(self, size, implementation, matrix_type):
set_mem_rlimit()
self.size = self._get_size()
size = self.size[size]
mem_info = get_mem_info()
try:
mem_available = mem_info['memavailable']
except KeyError:
mem_available = mem_info['memtotal']
max_size = int(mem_available * 0.7)//4
if size > max_size:
raise NotImplementedError()
# Setup temp file
f = tempfile.NamedTemporaryFile(delete=False, suffix='.mtx')
f.close()
self.filename = f.name
def teardown(self, size, implementation, matrix_type):
os.unlink(self.filename)
def track_mmread(self, size, implementation, matrix_type):
size = self.size[size]
if matrix_type == 'coo':
a = generate_coo(size)
elif matrix_type == 'dense':
a = generate_dense(size)
elif matrix_type == 'csr':
# cannot read directly into csr, only coo
return 0
else:
raise NotImplementedError
mmwrite(self.filename, a, symmetry='general')
del a
code = f"""
from {implementation} import mmread
mmread('{self.filename}')
"""
time, peak_mem = run_monitored(code)
return peak_mem / size
def track_mmwrite(self, size, implementation, matrix_type):
size = self.size[size]
code = f"""
import numpy as np
import scipy.sparse
from {implementation} import mmwrite
def generate_coo(size):
nnz = int(size / (4 + 4 + 8))
rows = np.arange(nnz, dtype=np.int32)
cols = np.arange(nnz, dtype=np.int32)
data = np.random.default_rng().uniform(low=0, high=1.0, size=nnz)
return scipy.sparse.coo_matrix((data, (rows, cols)), shape=(nnz, nnz))
def generate_csr(size):
nrows = 1000
nnz = int((size - (nrows + 1) * 4) / (4 + 8))
indptr = (np.arange(nrows + 1, dtype=np.float32) / nrows * nnz).astype(np.int32)
indptr[-1] = nnz
indices = np.arange(nnz, dtype=np.int32)
data = np.random.default_rng().uniform(low=0, high=1.0, size=nnz)
return scipy.sparse.csr_matrix((data, indices, indptr), shape=(nrows, nnz))
def generate_dense(size):
nnz = size // 8
return np.random.default_rng().uniform(low=0, high=1.0, size=(1, nnz))
a = generate_{matrix_type}({size})
mmwrite('{self.filename}', a, symmetry='general')
""" # noqa: E501
time, peak_mem = run_monitored(code)
return peak_mem / size
class IOSpeed(Benchmark):
"""
Basic speed test. Does not show full potential as
1) a relatively small matrix is used to keep test duration reasonable
2) StringIO/BytesIO are noticeably slower than native C++ I/O to an SSD.
"""
param_names = ['implementation', 'matrix_type']
params = [
['scipy.io', 'scipy.io._mmio', 'scipy.io._fast_matrix_market'],
['dense', 'coo'] # + ['csr']
]
def setup(self, implementation, matrix_type):
# Use a 10MB matrix size to keep the runtimes somewhat short
self.size = int(10e6)
if matrix_type == 'coo':
self.a = generate_coo(self.size)
elif matrix_type == 'dense':
self.a = generate_dense(self.size)
elif matrix_type == 'csr':
self.a = generate_csr(self.size)
else:
raise NotImplementedError
bio = BytesIO()
mmwrite(bio, self.a, symmetry='general')
self.a_str = bio.getvalue().decode()
def time_mmread(self, implementation, matrix_type):
if matrix_type == 'csr':
# cannot read directly into csr, only coo
return
if implementation == 'scipy.io':
impl_module = scipy.io
elif implementation == 'scipy.io._mmio':
impl_module = scipy.io._mmio
elif implementation == 'scipy.io._fast_matrix_market':
impl_module = scipy.io._fast_matrix_market
else:
raise NotImplementedError
impl_module.mmread(StringIO(self.a_str))
def time_mmwrite(self, implementation, matrix_type):
if implementation == 'scipy.io':
impl_module = scipy.io
elif implementation == 'scipy.io._mmio':
impl_module = scipy.io._mmio
elif implementation == 'scipy.io._fast_matrix_market':
impl_module = scipy.io._fast_matrix_market
else:
raise NotImplementedError
impl_module.mmwrite(BytesIO(), self.a, symmetry='general')

Some files were not shown because too many files have changed in this diff Show More