Compare commits
13 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
cd0cf2c4af | |
|
|
55624039e7 | |
|
|
8cdabd704f | |
|
|
87bc14529c | |
|
|
807db5fbff | |
|
|
56cbc37ffd | |
|
|
6b25a08728 | |
|
|
7b69aada05 | |
|
|
6cbb6f82a5 | |
|
|
7ce64df93c | |
|
|
7d5edb6b83 | |
|
|
af6543069f | |
|
|
8fb43855e1 |
|
|
@ -0,0 +1,82 @@
|
|||
name: GPU jobs
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- maintenance/**
|
||||
- gpu-ci
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- maintenance/**
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
env:
|
||||
CCACHE_DIR: "${{ github.workspace }}/.ccache"
|
||||
PIXI_CACHE_DIR: "${{ github.workspace }}/.cache/rattler"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pytorch_gpu:
|
||||
name: PyTorch, JAX, CuPy GPU
|
||||
runs-on: ghcr.io/cirruslabs/ubuntu-runner-amd64-gpu:22.04
|
||||
steps:
|
||||
- name: Checkout scipy repo
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Cache pixi
|
||||
uses: cirruslabs/cache@v4 #caa3ad0624c6c2acd8ba50ad452d1f44bba078bb # v4
|
||||
with:
|
||||
path: ${{ env.PIXI_CACHE_DIR }}
|
||||
# Cache hit if lock file did not change. If it did, still restore the cache,
|
||||
# since most packages will still be the same - the cache save will
|
||||
# then happen at the end (in case the lock file didn't change,
|
||||
# nothing is saved at the end of a job).
|
||||
key: ${{ runner.os }}-gpu-pixi-${{ hashFiles('.github/workflows/pixi.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-gpu-pixi
|
||||
|
||||
- name: Setup compiler cache
|
||||
uses: cirruslabs/cache@v4 #caa3ad0624c6c2acd8ba50ad452d1f44bba078bb # v4
|
||||
with:
|
||||
path: ${{ env.CCACHE_DIR }}
|
||||
# Make primary key unique by using `run_id`, this ensures the cache
|
||||
# is always saved at the end.
|
||||
key: ${{ runner.os }}-gpu-ccache-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-gpu-ccache
|
||||
|
||||
- name: run nvidia-smi
|
||||
run: nvidia-smi
|
||||
|
||||
- name: run nvidia-smi --query
|
||||
run: nvidia-smi --query
|
||||
|
||||
- uses: prefix-dev/setup-pixi@ba3bb36eb2066252b2363392b7739741bb777659 # v0.8.1
|
||||
with:
|
||||
pixi-version: v0.39.2
|
||||
manifest-path: .github/workflows/pixi.toml
|
||||
cache: false
|
||||
|
||||
- name: Build SciPy
|
||||
working-directory: .github/workflows
|
||||
run: pixi run build
|
||||
|
||||
#- name: Run PyTorch GPU tests
|
||||
#working-directory: .github/workflows
|
||||
#run: pixi run -e torch-cuda test-torch-cuda -v -m "array_api_backends and not slow"
|
||||
|
||||
#- name: Run JAX GPU tests
|
||||
#working-directory: .github/workflows
|
||||
#run: pixi run -e jax-cuda test-jax-cuda -v -m "array_api_backends and not slow"
|
||||
|
||||
#- name: Run CuPy tests
|
||||
#working-directory: .github/workflows
|
||||
#run: pixi run -e cupy test-cupy -v -m "array_api_backends and not slow"
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,150 @@
|
|||
[project]
|
||||
name = "scipy"
|
||||
version = "1.16.0.dev0"
|
||||
description = "Fundamental algorithms for scientific computing in Python"
|
||||
authors = ["SciPy Developers <scipy-dev@python.org>"]
|
||||
channels = ["conda-forge"]
|
||||
platforms = ["linux-64"]
|
||||
|
||||
[dependencies]
|
||||
compilers = ">=1.7.0,<2"
|
||||
pkg-config = ">=0.29.2,<0.30"
|
||||
ninja = ">=1.12.1,<2"
|
||||
python = ">=3.12.0,<3.13"
|
||||
meson = ">=1.5.1,<2"
|
||||
meson-python = ">=0.16.0"
|
||||
cython = ">=3.0.10,<4"
|
||||
python-build = "*"
|
||||
pip = "*"
|
||||
blas-devel = "*"
|
||||
numpy = ">=2.0.0"
|
||||
pybind11 = ">=2.13.1"
|
||||
pythran = ">=0.15.0"
|
||||
rich-click = "*"
|
||||
pydevtool = "*"
|
||||
# Add test dependencies to default list of dependencies so they show up in every
|
||||
# environment without having to also see the test task in those other envs
|
||||
pytest = "*"
|
||||
hypothesis = "*"
|
||||
pytest-xdist = "*"
|
||||
threadpoolctl = "*"
|
||||
pooch = "*"
|
||||
mpmath = "*"
|
||||
gmpy2 = "*"
|
||||
ccache = ">=4.10.1,<5"
|
||||
|
||||
[feature.build.tasks]
|
||||
build = { cmd = "python dev.py build -C-Dblas=blas -C-Dlapack=lapack -C-Duse-g77-abi=true", cwd = "../..", env = { CC = "ccache $CC", CXX = "ccache $CXX", FC = "ccache $FC" } }
|
||||
wheel = { cmd = "python -m build -wnx -Cbuild-dir=build-whl && cp dist/*.whl ../../wheelhouse/", cwd = "../.." }
|
||||
|
||||
[feature.test.tasks]
|
||||
test = { cmd = "python dev.py test", cwd = "../.." }
|
||||
|
||||
# BLAS/LAPACK features
|
||||
[feature.openblas.dependencies]
|
||||
libblas = { version = "*", build = "*openblas" }
|
||||
openblas = ">=0.3.27,<0.4"
|
||||
|
||||
[feature.mkl.target.linux-64.dependencies]
|
||||
libblas = { version = "*", build = "*mkl" }
|
||||
mkl = ">=2023.2.0,<2025"
|
||||
|
||||
|
||||
# CPU/CUDA features
|
||||
[feature.cpu.tasks]
|
||||
test-cpu = { cmd = "python dev.py test -b all", cwd = "../.." }
|
||||
|
||||
[feature.cuda]
|
||||
platforms = ["linux-64"]
|
||||
system-requirements = { cuda = "12" }
|
||||
|
||||
[feature.cuda.dependencies]
|
||||
cuda-version = ">=12.0,<13"
|
||||
|
||||
[feature.cuda.tasks]
|
||||
test-cuda = { cmd = "python dev.py test -b all", cwd = "../..", env = { SCIPY_DEVICE = "cuda" } }
|
||||
|
||||
|
||||
# Array libraries we have support for
|
||||
[feature.torch-base]
|
||||
platforms = ["linux-64"]
|
||||
|
||||
[feature.torch-base.tasks]
|
||||
test-torch = { cmd = "python dev.py test -b torch", cwd = "../.." }
|
||||
|
||||
[feature.torch-cpu.dependencies]
|
||||
pytorch-cpu = "*"
|
||||
|
||||
[feature.torch-cuda.dependencies]
|
||||
pytorch-gpu = "*"
|
||||
|
||||
[feature.torch-cuda.tasks]
|
||||
test-torch-cuda = { cmd = "python dev.py test -b torch", cwd = "../..", env = { SCIPY_DEVICE = "cuda" } }
|
||||
|
||||
|
||||
[feature.cupy]
|
||||
platforms = ["linux-64"]
|
||||
|
||||
[feature.cupy.dependencies]
|
||||
cupy = "*"
|
||||
|
||||
[feature.cupy.tasks]
|
||||
test-cupy = { cmd = "python dev.py test -b cupy", cwd = "../.." }
|
||||
|
||||
|
||||
[feature.jax-cpu]
|
||||
# Windows support pending: https://github.com/conda-forge/jaxlib-feedstock/issues/161
|
||||
platforms = ["linux-64"]
|
||||
|
||||
[feature.jax-cpu.dependencies]
|
||||
jax = "*"
|
||||
jaxlib = { version = "*", build = "*cpu*" }
|
||||
|
||||
[feature.jax-cuda]
|
||||
platforms = ["linux-64"]
|
||||
|
||||
[feature.jax-cuda.dependencies]
|
||||
jax = "*"
|
||||
jaxlib = { version = "*", build = "*cuda*" }
|
||||
|
||||
[feature.jax-base.tasks]
|
||||
test-jax = { cmd = "python dev.py test -b jax.numpy", cwd = "../.." }
|
||||
|
||||
[feature.jax-cuda.tasks]
|
||||
test-jax-cuda = { cmd = "python dev.py test -b jax.numpy", cwd = "../..", env = { SCIPY_DEVICE = "cuda" } }
|
||||
|
||||
[feature.array_api_strict.dependencies]
|
||||
array-api-strict = "*"
|
||||
|
||||
[feature.array_api_strict.tasks]
|
||||
test-strict = { cmd = "python dev.py test -b array_api_strict", cwd = "../.." }
|
||||
|
||||
|
||||
[environments]
|
||||
default = ["build", "test", "openblas"]
|
||||
torch = ["torch-base", "torch-cpu", "mkl"]
|
||||
torch-cuda = ["torch-base", "torch-cuda", "mkl", "cuda"]
|
||||
cupy = ["cupy"]
|
||||
jax = ["jax-base", "jax-cpu"]
|
||||
jax-cuda = ["jax-base", "jax-cuda", "cuda"]
|
||||
array-api-strict = ["array_api_strict"]
|
||||
array-api = [
|
||||
"cpu",
|
||||
"array_api_strict",
|
||||
"jax-base",
|
||||
"jax-cpu",
|
||||
"mkl",
|
||||
"torch-base",
|
||||
"torch-cpu",
|
||||
]
|
||||
array-api-cuda = [
|
||||
"cuda",
|
||||
"array_api_strict",
|
||||
"cupy",
|
||||
"jax-base",
|
||||
"jax-cuda",
|
||||
"mkl",
|
||||
"torch-base",
|
||||
"torch-cuda",
|
||||
"test",
|
||||
]
|
||||
|
|
@ -151,6 +151,7 @@ benchmarks/results
|
|||
benchmarks/scipy
|
||||
benchmarks/html
|
||||
benchmarks/scipy-benchmarks
|
||||
.github/workflows/.pixi
|
||||
.openblas
|
||||
scipy/_distributor_init_local.py
|
||||
scipy/__config__.py
|
||||
|
|
|
|||
|
|
@ -603,6 +603,7 @@ class TestMapCoordinates:
|
|||
assert out.dtype is np.dtype('f')
|
||||
assert_array_almost_equal(out, xp.asarray([[1]]))
|
||||
|
||||
@pytest.mark.skip_xp_backends(cpu_only=True)
|
||||
@pytest.mark.skipif('win32' in sys.platform or np.intp(0).itemsize < 8,
|
||||
reason='do not run on 32 bit or windows '
|
||||
'(no sparse memory)')
|
||||
|
|
|
|||
|
|
@ -1105,7 +1105,7 @@ class TestOAConvolve:
|
|||
# Regression test for #1745: crashes with 0-length input.
|
||||
xp_assert_equal(
|
||||
oaconvolve(xp.asarray(a), xp.asarray(b)),
|
||||
xp.asarray([]),
|
||||
xp.asarray([]), check_dtype=False
|
||||
)
|
||||
|
||||
def test_zero_rank(self, xp):
|
||||
|
|
@ -1621,6 +1621,7 @@ class TestCSpline1DEval:
|
|||
assert ynew.dtype == y.dtype
|
||||
|
||||
|
||||
@skip_xp_backends(cpu_only=True, exceptions=['cupy'])
|
||||
class TestOrderFilt:
|
||||
|
||||
def test_basic(self, xp):
|
||||
|
|
|
|||
|
|
@ -3874,6 +3874,7 @@ class TestStudentTest:
|
|||
P1_1_l = P1_1 / 2
|
||||
P1_1_g = 1 - (P1_1 / 2)
|
||||
|
||||
@skip_xp_backends(cpu_only=True)
|
||||
def test_onesample(self, xp):
|
||||
with suppress_warnings() as sup, \
|
||||
np.errstate(invalid="ignore", divide="ignore"):
|
||||
|
|
@ -3926,6 +3927,7 @@ class TestStudentTest:
|
|||
assert_raises(ValueError, stats.ttest_1samp, x, 5.0,
|
||||
nan_policy='foobar')
|
||||
|
||||
@skip_xp_backends(cpu_only=True)
|
||||
def test_1samp_alternative(self, xp):
|
||||
message = "`alternative` must be 'less', 'greater', or 'two-sided'."
|
||||
with pytest.raises(ValueError, match=message):
|
||||
|
|
@ -3939,6 +3941,7 @@ class TestStudentTest:
|
|||
xp_assert_close(p, xp.asarray(self.P1_1_g))
|
||||
xp_assert_close(t, xp.asarray(self.T1_1))
|
||||
|
||||
@skip_xp_backends(cpu_only=True)
|
||||
@pytest.mark.skip_xp_backends('jax.numpy', reason='Generic impl mutates array.')
|
||||
@pytest.mark.parametrize("alternative", ['two-sided', 'less', 'greater'])
|
||||
def test_1samp_ci_1d(self, xp, alternative):
|
||||
|
|
@ -3965,6 +3968,7 @@ class TestStudentTest:
|
|||
xp_assert_close(ci.high, xp.asarray(ref[alternative][1]))
|
||||
xp_assert_equal(res.df, xp.asarray(n-1))
|
||||
|
||||
@skip_xp_backends(cpu_only=True)
|
||||
def test_1samp_ci_iv(self, xp):
|
||||
# test `confidence_interval` method input validation
|
||||
res = stats.ttest_1samp(xp.arange(10.), 0.)
|
||||
|
|
@ -5135,6 +5139,7 @@ def _desc_stats(x1, x2, axis=0, *, xp=None):
|
|||
return _stats(x1, axis) + _stats(x2, axis)
|
||||
|
||||
|
||||
@skip_xp_backends(cpu_only=True)
|
||||
def test_ttest_ind(xp):
|
||||
# regression test
|
||||
tr = xp.asarray(1.0912746897927283)
|
||||
|
|
@ -5846,6 +5851,7 @@ class Test_ttest_trim:
|
|||
stats.ttest_ind([1, 2], [2, 1], trim=trim)
|
||||
|
||||
|
||||
@pytest.mark.skip_xp_backends(cpu_only=True)
|
||||
class Test_ttest_CI:
|
||||
# indices in order [alternative={two-sided, less, greater},
|
||||
# equal_var={False, True}, trim={0, 0.2}]
|
||||
|
|
@ -5940,6 +5946,7 @@ def test__broadcast_concatenate():
|
|||
assert b[i, j, k, l - a.shape[-3], m, n] == c[i, j, k, l, m, n]
|
||||
|
||||
|
||||
@pytest.mark.skip_xp_backends(cpu_only=True)
|
||||
def test_ttest_ind_with_uneq_var(xp):
|
||||
# check vs. R `t.test`, e.g.
|
||||
# options(digits=20)
|
||||
|
|
@ -6021,6 +6028,7 @@ def test_ttest_ind_with_uneq_var(xp):
|
|||
xp_assert_close(res.pvalue, pr_2D)
|
||||
|
||||
|
||||
@pytest.mark.skip_xp_backends(cpu_only=True)
|
||||
def test_ttest_ind_zero_division(xp):
|
||||
# test zero division problem
|
||||
x = xp.zeros(3)
|
||||
|
|
@ -6135,6 +6143,7 @@ def test_gh5686(xp):
|
|||
stats.ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2)
|
||||
|
||||
|
||||
@skip_xp_backends(cpu_only=True)
|
||||
def test_ttest_ind_from_stats_inputs_zero(xp):
|
||||
# Regression test for gh-6409.
|
||||
zero = xp.asarray(0.)
|
||||
|
|
@ -6268,6 +6277,7 @@ def test_ttest_1samp_new_omit(xp):
|
|||
xp_assert_close(t, tr)
|
||||
|
||||
|
||||
@pytest.mark.skip_xp_backends(cpu_only=True)
|
||||
@pytest.mark.skip_xp_backends('jax.numpy', reason='Generic impl mutates array.')
|
||||
def test_ttest_1samp_popmean_array(xp):
|
||||
# when popmean.shape[axis] != 1, raise an error
|
||||
|
|
|
|||
Loading…
Reference in New Issue