Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ Also, that release drops support for Python 3.9, making Python 3.10 the minimum
* Changed `dpnp.partition` implementation to reuse `dpnp.sort` where it brings the performance benefit [#2766](https://github.com/IntelPython/dpnp/pull/2766)
* `dpnp` uses pybind11 3.0.2 [#27734](https://github.com/IntelPython/dpnp/pull/2773)
* Modified CMake files for the extension to explicitly mark DPC++ compiler and dpctl headers as system ones and so to suppress the build warning generated inside them [#2770](https://github.com/IntelPython/dpnp/pull/2770)
* Updated QR tests to avoid element-wise comparisons for `raw` and `r` modes [#2785](https://github.com/IntelPython/dpnp/pull/2785)

### Deprecated

Expand Down
70 changes: 70 additions & 0 deletions dpnp/tests/qr_helper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
import numpy

from .helper import has_support_aspect64


def gram(x, xp):
# Return Gram matrix: X^H @ X
return xp.conjugate(x).swapaxes(-1, -2) @ x


def get_R_from_raw(h, m, n, xp):
# Get reduced R from NumPy-style raw QR:
# R = triu((tril(h))^T), shape (..., k, n)
k = min(m, n)
rt = xp.tril(h)
r = xp.swapaxes(rt, -1, -2)
r = xp.triu(r[..., :m, :n])
return r[..., :k, :]


def check_qr(a_np, a_xp, mode, xp):
# QR is not unique:
# element-wise comparison with NumPy may differ by sign/phase.
# To verify correctness use mode-dependent functional checks:
# complete/reduced: check decomposition Q @ R = A
# raw/r: check invariant R^H @ R = A^H @ A
if mode in ("complete", "reduced"):
res = xp.linalg.qr(a_xp, mode)
assert xp.allclose(res.Q @ res.R, a_xp, atol=1e-5)

# Since QR satisfies A = Q @ R with orthonormal Q (Q^H @ Q = I),
# validate correctness via the invariant R^H @ R == A^H @ A
# for raw/r modes
elif mode == "raw":
_, tau_np = numpy.linalg.qr(a_np, mode=mode)
h_xp, tau_xp = xp.linalg.qr(a_xp, mode=mode)

m, n = a_np.shape[-2], a_np.shape[-1]
Rraw_xp = get_R_from_raw(h_xp, m, n, xp)

# Use reduced QR as a reference:
# reduced is validated via Q @ R == A
exp_res = xp.linalg.qr(a_xp, mode="reduced")
exp_r = exp_res.R
Comment on lines +43 to +44
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
exp_res = xp.linalg.qr(a_xp, mode="reduced")
exp_r = exp_res.R
exp_r = xp.linalg.qr(a_xp, mode="reduced").R

assert xp.allclose(Rraw_xp, exp_r, atol=1e-4, rtol=1e-4)

exp_xp = gram(a_xp, xp)

# Compare R^H @ R == A^H @ A
assert xp.allclose(gram(Rraw_xp, xp), exp_xp, atol=1e-4, rtol=1e-4)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we use atol, rtol values based on dtype of result array?


assert tau_xp.shape == tau_np.shape
if not has_support_aspect64(tau_xp.sycl_device):
assert tau_xp.dtype.kind == tau_np.dtype.kind
else:
assert tau_xp.dtype == tau_np.dtype

else: # mode == "r"
r_xp = xp.linalg.qr(a_xp, mode="r")

# Use reduced QR as a reference:
# reduced is validated via Q @ R == A
exp_res = xp.linalg.qr(a_xp, mode="reduced")
exp_r = exp_res.R
Comment on lines +63 to +64
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
exp_res = xp.linalg.qr(a_xp, mode="reduced")
exp_r = exp_res.R
exp_r = xp.linalg.qr(a_xp, mode="reduced").R

assert xp.allclose(r_xp, exp_r, atol=1e-4, rtol=1e-4)

exp_xp = gram(a_xp, xp)

# Compare R^H @ R == A^H @ A
assert xp.allclose(gram(r_xp, xp), exp_xp, atol=1e-4, rtol=1e-4)
105 changes: 15 additions & 90 deletions dpnp/tests/test_linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
has_support_aspect64,
numpy_version,
)
from .qr_helper import check_qr
from .third_party.cupy import testing


Expand Down Expand Up @@ -3584,7 +3585,7 @@ def test_error(self):


class TestQr:
@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True))
@pytest.mark.parametrize("dtype", get_float_complex_dtypes())
@pytest.mark.parametrize(
"shape",
[
Expand All @@ -3610,60 +3611,27 @@ class TestQr:
"(2, 2, 4)",
],
)
@pytest.mark.parametrize("mode", ["r", "raw", "complete", "reduced"])
@pytest.mark.parametrize("mode", ["complete", "reduced", "r", "raw"])
def test_qr(self, dtype, shape, mode):
a = generate_random_numpy_array(shape, dtype, seed_value=81)
ia = dpnp.array(a)

if mode == "r":
np_r = numpy.linalg.qr(a, mode)
dpnp_r = dpnp.linalg.qr(ia, mode)
else:
np_q, np_r = numpy.linalg.qr(a, mode)

# check decomposition
if mode in ("complete", "reduced"):
result = dpnp.linalg.qr(ia, mode)
dpnp_q, dpnp_r = result.Q, result.R
assert dpnp.allclose(
dpnp.matmul(dpnp_q, dpnp_r), ia, atol=1e-05
)
else: # mode=="raw"
dpnp_q, dpnp_r = dpnp.linalg.qr(ia, mode)
assert_dtype_allclose(dpnp_q, np_q, factor=24)
a = generate_random_numpy_array(shape, dtype, seed_value=None)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Was that intended to pass seed_value=None? And why so?

ia = dpnp.array(a, dtype=dtype)

if mode in ("raw", "r"):
assert_dtype_allclose(dpnp_r, np_r, factor=24)
check_qr(a, ia, mode, dpnp)

@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True))
@pytest.mark.parametrize("dtype", get_float_complex_dtypes())
@pytest.mark.parametrize(
"shape",
[(32, 32), (8, 16, 16)],
ids=["(32, 32)", "(8, 16, 16)"],
)
@pytest.mark.parametrize("mode", ["r", "raw", "complete", "reduced"])
@pytest.mark.parametrize("mode", ["complete", "reduced", "r", "raw"])
def test_qr_large(self, dtype, shape, mode):
a = generate_random_numpy_array(shape, dtype, seed_value=81)
ia = dpnp.array(a)

if mode == "r":
np_r = numpy.linalg.qr(a, mode)
dpnp_r = dpnp.linalg.qr(ia, mode)
else:
np_q, np_r = numpy.linalg.qr(a, mode)

# check decomposition
if mode in ("complete", "reduced"):
result = dpnp.linalg.qr(ia, mode)
dpnp_q, dpnp_r = result.Q, result.R
assert dpnp.allclose(dpnp.matmul(dpnp_q, dpnp_r), ia, atol=1e-5)
else: # mode=="raw"
dpnp_q, dpnp_r = dpnp.linalg.qr(ia, mode)
assert_allclose(dpnp_q, np_q, atol=1e-4)
if mode in ("raw", "r"):
assert_allclose(dpnp_r, np_r, atol=1e-4)
check_qr(a, ia, mode, dpnp)

@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True))
@pytest.mark.parametrize("dtype", get_float_complex_dtypes())
@pytest.mark.parametrize(
"shape",
[(0, 0), (0, 2), (2, 0), (2, 0, 3), (2, 3, 0), (0, 2, 3)],
Expand All @@ -3676,65 +3644,22 @@ def test_qr_large(self, dtype, shape, mode):
"(0, 2, 3)",
],
)
@pytest.mark.parametrize("mode", ["r", "raw", "complete", "reduced"])
@pytest.mark.parametrize("mode", ["complete", "reduced", "r", "raw"])
def test_qr_empty(self, dtype, shape, mode):
a = numpy.empty(shape, dtype=dtype)
ia = dpnp.array(a)

if mode == "r":
np_r = numpy.linalg.qr(a, mode)
dpnp_r = dpnp.linalg.qr(ia, mode)
else:
np_q, np_r = numpy.linalg.qr(a, mode)

if mode in ("complete", "reduced"):
result = dpnp.linalg.qr(ia, mode)
dpnp_q, dpnp_r = result.Q, result.R
else:
dpnp_q, dpnp_r = dpnp.linalg.qr(ia, mode)

assert_dtype_allclose(dpnp_q, np_q)
check_qr(a, ia, mode, dpnp)

assert_dtype_allclose(dpnp_r, np_r)

@pytest.mark.parametrize("mode", ["r", "raw", "complete", "reduced"])
@pytest.mark.parametrize("mode", ["complete", "reduced", "r", "raw"])
def test_qr_strides(self, mode):
a = generate_random_numpy_array((5, 5))
ia = dpnp.array(a)

# positive strides
if mode == "r":
np_r = numpy.linalg.qr(a[::2, ::2], mode)
dpnp_r = dpnp.linalg.qr(ia[::2, ::2], mode)
else:
np_q, np_r = numpy.linalg.qr(a[::2, ::2], mode)

if mode in ("complete", "reduced"):
result = dpnp.linalg.qr(ia[::2, ::2], mode)
dpnp_q, dpnp_r = result.Q, result.R
else:
dpnp_q, dpnp_r = dpnp.linalg.qr(ia[::2, ::2], mode)

assert_dtype_allclose(dpnp_q, np_q)

assert_dtype_allclose(dpnp_r, np_r)

check_qr(a[::2, ::2], ia[::2, ::2], mode, dpnp)
# negative strides
if mode == "r":
np_r = numpy.linalg.qr(a[::-2, ::-2], mode)
dpnp_r = dpnp.linalg.qr(ia[::-2, ::-2], mode)
else:
np_q, np_r = numpy.linalg.qr(a[::-2, ::-2], mode)

if mode in ("complete", "reduced"):
result = dpnp.linalg.qr(ia[::-2, ::-2], mode)
dpnp_q, dpnp_r = result.Q, result.R
else:
dpnp_q, dpnp_r = dpnp.linalg.qr(ia[::-2, ::-2], mode)

assert_dtype_allclose(dpnp_q, np_q)

assert_dtype_allclose(dpnp_r, np_r)
check_qr(a[::-2, ::-2], ia[::-2, ::-2], mode, dpnp)

def test_qr_errors(self):
a_dp = dpnp.array([[1, 2], [3, 5]], dtype="float32")
Expand Down
41 changes: 20 additions & 21 deletions dpnp/tests/third_party/cupy/linalg_tests/test_decomposition.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,9 @@
# from cupy.cuda import runtime
# from cupy.linalg import _util
from dpnp.tests.helper import (
LTS_VERSION,
has_support_aspect64,
is_lts_driver,
)
from dpnp.tests.qr_helper import check_qr
from dpnp.tests.third_party.cupy import testing
from dpnp.tests.third_party.cupy.testing import _condition

Expand Down Expand Up @@ -169,7 +168,6 @@ def test_decomposition(self, dtype):
)
)
class TestQRDecomposition(unittest.TestCase):

@testing.for_dtypes("fdFD")
def check_mode(self, array, mode, dtype):
# if runtime.is_hip and driver.get_build_version() < 307:
Expand All @@ -178,22 +176,29 @@ def check_mode(self, array, mode, dtype):

a_cpu = numpy.asarray(array, dtype=dtype)
a_gpu = cupy.asarray(array, dtype=dtype)
result_gpu = cupy.linalg.qr(a_gpu, mode=mode)
# QR is not unique:
# element-wise comparison with NumPy may differ by sign/phase.
# To verify correctness use mode-dependent functional checks:
# complete/reduced: check decomposition Q @ R = A
# raw/r: check invariant R^H @ R = A^H @ A

# result_gpu = cupy.linalg.qr(a_gpu, mode=mode)
if (
mode != "raw"
or numpy.lib.NumpyVersion(numpy.__version__) >= "1.22.0rc1"
):
result_cpu = numpy.linalg.qr(a_cpu, mode=mode)
self._check_result(result_cpu, result_gpu)

def _check_result(self, result_cpu, result_gpu):
if isinstance(result_cpu, tuple):
for b_cpu, b_gpu in zip(result_cpu, result_gpu):
assert b_cpu.dtype == b_gpu.dtype
testing.assert_allclose(b_cpu, b_gpu, atol=1e-4)
else:
assert result_cpu.dtype == result_gpu.dtype
testing.assert_allclose(result_cpu, result_gpu, atol=1e-4)
# result_cpu = numpy.linalg.qr(a_cpu, mode=mode)
# self._check_result(result_cpu, result_gpu, a_gpu, mode)
check_qr(a_cpu, a_gpu, mode, cupy)

# def _check_result(self, result_cpu, result_gpu):
# if isinstance(result_cpu, tuple):
# for b_cpu, b_gpu in zip(result_cpu, result_gpu):
# assert b_cpu.dtype == b_gpu.dtype
# testing.assert_allclose(b_cpu, b_gpu, atol=1e-4)
# else:
# assert result_cpu.dtype == result_gpu.dtype
# testing.assert_allclose(result_cpu, result_gpu, atol=1e-4)

@testing.fix_random()
@_condition.repeat(3, 10)
Expand All @@ -202,19 +207,13 @@ def test_mode(self):
self.check_mode(numpy.random.randn(3, 3), mode=self.mode)
self.check_mode(numpy.random.randn(5, 4), mode=self.mode)

@pytest.mark.skipif(
is_lts_driver(version=LTS_VERSION.V1_6), reason="SAT-8375"
)
@testing.with_requires("numpy>=1.22")
@testing.fix_random()
def test_mode_rank3(self):
self.check_mode(numpy.random.randn(3, 2, 4), mode=self.mode)
self.check_mode(numpy.random.randn(4, 3, 3), mode=self.mode)
self.check_mode(numpy.random.randn(2, 5, 4), mode=self.mode)

@pytest.mark.skipif(
is_lts_driver(version=LTS_VERSION.V1_6), reason="SAT-8375"
)
@testing.with_requires("numpy>=1.22")
@testing.fix_random()
def test_mode_rank4(self):
Expand Down
Loading