diff --git a/aesara/__init__.py b/aesara/__init__.py index b0c9c2c805..2e50c79392 100644 --- a/aesara/__init__.py +++ b/aesara/__init__.py @@ -167,7 +167,7 @@ def get_scalar_constant_value(v): """ # Is it necessary to test for presence of aesara.sparse at runtime? sparse = globals().get("sparse") - if sparse and isinstance(v.type, sparse.SparseType): + if sparse and isinstance(v.type, sparse.SparseTensorType): if v.owner is not None and isinstance(v.owner.op, sparse.CSM): data = v.owner.inputs[0] return tensor.get_scalar_constant_value(data) diff --git a/aesara/link/c/type.py b/aesara/link/c/type.py index 1920f36e61..4696b3333b 100644 --- a/aesara/link/c/type.py +++ b/aesara/link/c/type.py @@ -17,7 +17,7 @@ class CType(Type, CLinkerType): - `TensorType`: for numpy.ndarray - - `SparseType`: for scipy.sparse + - `SparseTensorType`: for scipy.sparse But you are encouraged to write your own, as described in WRITEME. diff --git a/aesara/misc/may_share_memory.py b/aesara/misc/may_share_memory.py index b522834e33..89545d8e44 100644 --- a/aesara/misc/may_share_memory.py +++ b/aesara/misc/may_share_memory.py @@ -12,7 +12,7 @@ try: import scipy.sparse - from aesara.sparse.basic import SparseType + from aesara.sparse.basic import SparseTensorType def _is_sparse(a): return scipy.sparse.issparse(a) @@ -64,4 +64,4 @@ def may_share_memory(a, b, raise_other_type=True): if a_gpua or b_gpua: return False - return SparseType.may_share_memory(a, b) + return SparseTensorType.may_share_memory(a, b) diff --git a/aesara/sparse/__init__.py b/aesara/sparse/__init__.py index 7b6873105f..376da5dcce 100644 --- a/aesara/sparse/__init__.py +++ b/aesara/sparse/__init__.py @@ -9,7 +9,7 @@ enable_sparse = False warn("SciPy can't be imported. Sparse matrix support is disabled.") -from aesara.sparse.type import SparseType, _is_sparse +from aesara.sparse.type import SparseTensorType, _is_sparse if enable_sparse: diff --git a/aesara/sparse/basic.py b/aesara/sparse/basic.py index b27ae5bef8..ba80e38db7 100644 --- a/aesara/sparse/basic.py +++ b/aesara/sparse/basic.py @@ -22,7 +22,7 @@ from aesara.link.c.op import COp from aesara.link.c.type import generic from aesara.misc.safe_asarray import _asarray -from aesara.sparse.type import SparseType, _is_sparse +from aesara.sparse.type import SparseTensorType, _is_sparse from aesara.sparse.utils import hash_from_sparse from aesara.tensor import basic as at from aesara.tensor.basic import Split @@ -80,11 +80,11 @@ def _is_sparse_variable(x): if not isinstance(x, Variable): raise NotImplementedError( "this function should only be called on " - "*variables* (of type sparse.SparseType " + "*variables* (of type sparse.SparseTensorType " "or TensorType, for instance), not ", x, ) - return isinstance(x.type, SparseType) + return isinstance(x.type, SparseTensorType) def _is_dense_variable(x): @@ -100,7 +100,7 @@ def _is_dense_variable(x): if not isinstance(x, Variable): raise NotImplementedError( "this function should only be called on " - "*variables* (of type sparse.SparseType or " + "*variables* (of type sparse.SparseTensorType or " "TensorType, for instance), not ", x, ) @@ -159,13 +159,15 @@ def as_sparse_variable(x, name=None, ndim=None, **kwargs): else: x = x.outputs[0] if isinstance(x, Variable): - if not isinstance(x.type, SparseType): - raise TypeError("Variable type field must be a SparseType.", x, x.type) + if not isinstance(x.type, SparseTensorType): + raise TypeError( + "Variable type field must be a SparseTensorType.", x, x.type + ) return x try: return constant(x, name=name) except TypeError: - raise TypeError(f"Cannot convert {x} to SparseType", type(x)) + raise TypeError(f"Cannot convert {x} to SparseTensorType", type(x)) as_sparse = as_sparse_variable @@ -198,10 +200,10 @@ def constant(x, name=None): raise TypeError("sparse.constant must be called on a " "scipy.sparse.spmatrix") try: return SparseConstant( - SparseType(format=x.format, dtype=x.dtype), x.copy(), name=name + SparseTensorType(format=x.format, dtype=x.dtype), x.copy(), name=name ) except TypeError: - raise TypeError(f"Could not convert {x} to SparseType", type(x)) + raise TypeError(f"Could not convert {x} to SparseTensorType", type(x)) def sp_ones_like(x): @@ -259,7 +261,7 @@ def to_dense(self, *args, **kwargs): self = self.toarray() new_args = [ arg.toarray() - if hasattr(arg, "type") and isinstance(arg.type, SparseType) + if hasattr(arg, "type") and isinstance(arg.type, SparseTensorType) else arg for arg in args ] @@ -503,15 +505,15 @@ def __repr__(self): return str(self) -SparseType.variable_type = SparseVariable -SparseType.constant_type = SparseConstant +SparseTensorType.variable_type = SparseVariable +SparseTensorType.constant_type = SparseConstant -# for more dtypes, call SparseType(format, dtype) +# for more dtypes, call SparseTensorType(format, dtype) def matrix(format, name=None, dtype=None): if dtype is None: dtype = config.floatX - type = SparseType(format=format, dtype=dtype) + type = SparseTensorType(format=format, dtype=dtype) return type(name) @@ -527,15 +529,15 @@ def bsr_matrix(name=None, dtype=None): return matrix("bsr", name, dtype) -# for more dtypes, call SparseType(format, dtype) -csc_dmatrix = SparseType(format="csc", dtype="float64") -csr_dmatrix = SparseType(format="csr", dtype="float64") -bsr_dmatrix = SparseType(format="bsr", dtype="float64") -csc_fmatrix = SparseType(format="csc", dtype="float32") -csr_fmatrix = SparseType(format="csr", dtype="float32") -bsr_fmatrix = SparseType(format="bsr", dtype="float32") +# for more dtypes, call SparseTensorType(format, dtype) +csc_dmatrix = SparseTensorType(format="csc", dtype="float64") +csr_dmatrix = SparseTensorType(format="csr", dtype="float64") +bsr_dmatrix = SparseTensorType(format="bsr", dtype="float64") +csc_fmatrix = SparseTensorType(format="csc", dtype="float32") +csr_fmatrix = SparseTensorType(format="csr", dtype="float32") +bsr_fmatrix = SparseTensorType(format="bsr", dtype="float32") -all_dtypes = list(SparseType.dtype_specs_map.keys()) +all_dtypes = list(SparseTensorType.dtype_specs_map.keys()) complex_dtypes = [t for t in all_dtypes if t[:7] == "complex"] float_dtypes = [t for t in all_dtypes if t[:5] == "float"] int_dtypes = [t for t in all_dtypes if t[:3] == "int"] @@ -725,7 +727,7 @@ def make_node(self, data, indices, indptr, shape): return Apply( self, [data, indices, indptr, shape], - [SparseType(dtype=data.type.dtype, format=self.format)()], + [SparseTensorType(dtype=data.type.dtype, format=self.format)()], ) def perform(self, node, inputs, outputs): @@ -931,7 +933,9 @@ def __init__(self, out_type): def make_node(self, x): x = as_sparse_variable(x) assert x.format in ("csr", "csc") - return Apply(self, [x], [SparseType(dtype=self.out_type, format=x.format)()]) + return Apply( + self, [x], [SparseTensorType(dtype=self.out_type, format=x.format)()] + ) def perform(self, node, inputs, outputs): (x,) = inputs @@ -1014,7 +1018,7 @@ def __str__(self): return f"{self.__class__.__name__}{{structured_grad={self.sparse_grad}}}" def __call__(self, x): - if not isinstance(x.type, SparseType): + if not isinstance(x.type, SparseTensorType): return x return super().__call__(x) @@ -1097,7 +1101,7 @@ def __str__(self): return f"{self.__class__.__name__}{{{self.format}}}" def __call__(self, x): - if isinstance(x.type, SparseType): + if isinstance(x.type, SparseTensorType): return x return super().__call__(x) @@ -1116,12 +1120,14 @@ def make_node(self, x): else: assert x.ndim == 2 - return Apply(self, [x], [SparseType(dtype=x.type.dtype, format=self.format)()]) + return Apply( + self, [x], [SparseTensorType(dtype=x.type.dtype, format=self.format)()] + ) def perform(self, node, inputs, outputs): (x,) = inputs (out,) = outputs - out[0] = SparseType.format_cls[self.format](x) + out[0] = SparseTensorType.format_cls[self.format](x) def grad(self, inputs, gout): (x,) = inputs @@ -1585,7 +1591,11 @@ def make_node(self, x): return Apply( self, [x], - [SparseType(dtype=x.type.dtype, format=self.format_map[x.type.format])()], + [ + SparseTensorType( + dtype=x.type.dtype, format=self.format_map[x.type.format] + )() + ], ) def perform(self, node, inputs, outputs): @@ -2002,7 +2012,7 @@ def make_node(self, diag): if diag.type.ndim != 1: raise TypeError("data argument must be a vector", diag.type) - return Apply(self, [diag], [SparseType(dtype=diag.dtype, format="csc")()]) + return Apply(self, [diag], [SparseTensorType(dtype=diag.dtype, format="csc")()]) def perform(self, node, inputs, outputs): (z,) = outputs @@ -2146,7 +2156,7 @@ def make_node(self, x, y): assert y.format in ("csr", "csc") out_dtype = aes.upcast(x.type.dtype, y.type.dtype) return Apply( - self, [x, y], [SparseType(dtype=out_dtype, format=x.type.format)()] + self, [x, y], [SparseTensorType(dtype=out_dtype, format=x.type.format)()] ) def perform(self, node, inputs, outputs): @@ -2183,7 +2193,7 @@ def make_node(self, x, y): if x.type.format != y.type.format: raise NotImplementedError() return Apply( - self, [x, y], [SparseType(dtype=x.type.dtype, format=x.type.format)()] + self, [x, y], [SparseTensorType(dtype=x.type.dtype, format=x.type.format)()] ) def perform(self, node, inputs, outputs): @@ -2286,7 +2296,7 @@ def make_node(self, x, y): if x.type.dtype != y.type.dtype: raise NotImplementedError() return Apply( - self, [x, y], [SparseType(dtype=x.type.dtype, format=x.type.format)()] + self, [x, y], [SparseTensorType(dtype=x.type.dtype, format=x.type.format)()] ) def perform(self, node, inputs, outputs): @@ -2426,7 +2436,7 @@ def make_node(self, x, y): assert y.format in ("csr", "csc") out_dtype = aes.upcast(x.type.dtype, y.type.dtype) return Apply( - self, [x, y], [SparseType(dtype=out_dtype, format=x.type.format)()] + self, [x, y], [SparseTensorType(dtype=out_dtype, format=x.type.format)()] ) def perform(self, node, inputs, outputs): @@ -2469,7 +2479,7 @@ def make_node(self, x, y): # Broadcasting of the sparse matrix is not supported. # We support nd == 0 used by grad of SpSum() assert y.type.ndim in (0, 2) - out = SparseType(dtype=dtype, format=x.type.format)() + out = SparseTensorType(dtype=dtype, format=x.type.format)() return Apply(self, [x, y], [out]) def perform(self, node, inputs, outputs): @@ -2559,7 +2569,7 @@ def make_node(self, x, y): f"Got {x.type.dtype} and {y.type.dtype}." ) return Apply( - self, [x, y], [SparseType(dtype=x.type.dtype, format=x.type.format)()] + self, [x, y], [SparseTensorType(dtype=x.type.dtype, format=x.type.format)()] ) def perform(self, node, inputs, outputs): @@ -2694,7 +2704,9 @@ def make_node(self, x, y): if x.type.format != y.type.format: raise NotImplementedError() - return Apply(self, [x, y], [SparseType(dtype="uint8", format=x.type.format)()]) + return Apply( + self, [x, y], [SparseTensorType(dtype="uint8", format=x.type.format)()] + ) def perform(self, node, inputs, outputs): (x, y) = inputs @@ -3050,7 +3062,9 @@ def make_node(self, *mat): for x in var: assert x.format in ("csr", "csc") - return Apply(self, var, [SparseType(dtype=self.dtype, format=self.format)()]) + return Apply( + self, var, [SparseTensorType(dtype=self.dtype, format=self.format)()] + ) def perform(self, node, block, outputs): (out,) = outputs @@ -3578,7 +3592,7 @@ def make_node(self, x, y): raise NotImplementedError() inputs = [x, y] # Need to convert? e.g. assparse - outputs = [SparseType(dtype=x.type.dtype, format=myformat)()] + outputs = [SparseTensorType(dtype=x.type.dtype, format=myformat)()] return Apply(self, inputs, outputs) def perform(self, node, inp, out_): @@ -3702,7 +3716,7 @@ def make_node(self, a, b): raise NotImplementedError("non-matrix b") if _is_sparse_variable(b): - return Apply(self, [a, b], [SparseType(a.type.format, dtype_out)()]) + return Apply(self, [a, b], [SparseTensorType(a.type.format, dtype_out)()]) else: return Apply( self, @@ -3719,7 +3733,7 @@ def perform(self, node, inputs, outputs): ) variable = a * b - if isinstance(node.outputs[0].type, SparseType): + if isinstance(node.outputs[0].type, SparseTensorType): assert _is_sparse(variable) out[0] = variable return diff --git a/aesara/sparse/sandbox/sp2.py b/aesara/sparse/sandbox/sp2.py index 95851887c4..e86db84ae4 100644 --- a/aesara/sparse/sandbox/sp2.py +++ b/aesara/sparse/sandbox/sp2.py @@ -7,7 +7,7 @@ from aesara.graph.op import Op from aesara.sparse.basic import ( Remove0, - SparseType, + SparseTensorType, _is_sparse, as_sparse_variable, remove0, @@ -108,7 +108,9 @@ def make_node(self, n, p, shape): assert shape.dtype in discrete_dtypes return Apply( - self, [n, p, shape], [SparseType(dtype=self.dtype, format=self.format)()] + self, + [n, p, shape], + [SparseTensorType(dtype=self.dtype, format=self.format)()], ) def perform(self, node, inputs, outputs): diff --git a/aesara/sparse/sharedvar.py b/aesara/sparse/sharedvar.py index eb512bd678..d0a681ec96 100644 --- a/aesara/sparse/sharedvar.py +++ b/aesara/sparse/sharedvar.py @@ -3,7 +3,7 @@ import scipy.sparse from aesara.compile import SharedVariable, shared_constructor -from aesara.sparse.basic import SparseType, _sparse_py_operators +from aesara.sparse.basic import SparseTensorType, _sparse_py_operators class SparseTensorSharedVariable(_sparse_py_operators, SharedVariable): @@ -16,7 +16,7 @@ def sparse_constructor( value, name=None, strict=False, allow_downcast=None, borrow=False, format=None ): """ - SharedVariable Constructor for SparseType. + SharedVariable Constructor for SparseTensorType. writeme @@ -29,7 +29,7 @@ def sparse_constructor( if format is None: format = value.format - type = SparseType(format=format, dtype=value.dtype) + type = SparseTensorType(format=format, dtype=value.dtype) if not borrow: value = copy.deepcopy(value) return SparseTensorSharedVariable( diff --git a/aesara/sparse/type.py b/aesara/sparse/type.py index 771a445450..8812b76c98 100644 --- a/aesara/sparse/type.py +++ b/aesara/sparse/type.py @@ -25,9 +25,8 @@ def _is_sparse(x): return isinstance(x, scipy.sparse.spmatrix) -class SparseType(TensorType, HasDataType): - """ - Fundamental way to create a sparse node. +class SparseTensorType(TensorType, HasDataType): + """A `Type` for sparse tensors. Parameters ---------- @@ -42,8 +41,7 @@ class SparseType(TensorType, HasDataType): Notes ----- - As far as I can tell, L{scipy.sparse} objects must be matrices, i.e. - have dimension 2. + Currently, sparse tensors can only be matrices (i.e. have two dimensions). """ @@ -126,15 +124,13 @@ def filter(self, value, strict=False, allow_downcast=None): raise NotImplementedError() return sp - @staticmethod - def may_share_memory(a, b): - # This is Fred suggestion for a quick and dirty way of checking - # aliasing .. this can potentially be further refined (ticket #374) + @classmethod + def may_share_memory(cls, a, b): if _is_sparse(a) and _is_sparse(b): return ( - SparseType.may_share_memory(a, b.data) - or SparseType.may_share_memory(a, b.indices) - or SparseType.may_share_memory(a, b.indptr) + cls.may_share_memory(a, b.data) + or cls.may_share_memory(a, b.indices) + or cls.may_share_memory(a, b.indptr) ) if _is_sparse(b) and isinstance(a, np.ndarray): a, b = b, a @@ -151,7 +147,7 @@ def may_share_memory(a, b): def convert_variable(self, var): res = super().convert_variable(var) - if res and not isinstance(res.type, SparseType): + if res and not isinstance(res.type, type(self)): # TODO: Convert to this sparse format raise NotImplementedError() @@ -232,9 +228,8 @@ def is_super(self, otype): return False -# Register SparseType's C code for ViewOp. aesara.compile.register_view_op_c_code( - SparseType, + SparseTensorType, """ Py_XDECREF(%(oname)s); %(oname)s = %(iname)s; @@ -242,3 +237,6 @@ def is_super(self, otype): """, 1, ) + +# This is a deprecated alias used for (temporary) backward-compatibility +SparseType = SparseTensorType diff --git a/aesara/tensor/basic.py b/aesara/tensor/basic.py index a227a02446..468c332849 100644 --- a/aesara/tensor/basic.py +++ b/aesara/tensor/basic.py @@ -314,9 +314,9 @@ def get_scalar_constant_value( except ValueError: raise NotScalarConstantError() - from aesara.sparse.type import SparseType + from aesara.sparse.type import SparseTensorType - if isinstance(v.type, SparseType): + if isinstance(v.type, SparseTensorType): raise NotScalarConstantError() return data diff --git a/doc/extending/other_ops.rst b/doc/extending/other_ops.rst index 85c36be915..8965912ab1 100644 --- a/doc/extending/other_ops.rst +++ b/doc/extending/other_ops.rst @@ -44,7 +44,7 @@ usual dense tensors. In particular, in the instead of ``as_tensor_variable(x)``. Another difference is that you need to use ``SparseVariable`` and -``SparseType`` instead of ``TensorVariable`` and ``TensorType``. +``SparseTensorType`` instead of ``TensorVariable`` and ``TensorType``. Do not forget that we support only sparse matrices (so only 2 dimensions) and (like in SciPy) they do not support broadcasting operations by default @@ -55,7 +55,7 @@ you can create output variables like this: .. code-block:: python out_format = inputs[0].format # or 'csr' or 'csc' if the output format is fixed - SparseType(dtype=inputs[0].dtype, format=out_format).make_variable() + SparseTensorType(dtype=inputs[0].dtype, format=out_format).make_variable() See the sparse :class:`Aesara.sparse.basic.Cast` `Op` code for a good example of a sparse `Op` with Python code. @@ -226,7 +226,7 @@ along with pointers to the relevant documentation. primitive type. The C type associated with this Aesara type is the represented C primitive itself. -* :ref:`SparseType ` : Aesara `Type` used to represent sparse +* :ref:`SparseTensorType ` : Aesara `Type` used to represent sparse tensors. There is no equivalent C type for this Aesara `Type` but you can split a sparse variable into its parts as TensorVariables. Those can then be used as inputs to an op with C code. diff --git a/tests/compile/function/test_pfunc.py b/tests/compile/function/test_pfunc.py index 2aeb7477bd..6cb0e40e76 100644 --- a/tests/compile/function/test_pfunc.py +++ b/tests/compile/function/test_pfunc.py @@ -751,8 +751,8 @@ def test_sparse_input_aliasing_affecting_inplace_operations(self): # operations are used) and to break the elemwise composition # with some non-elemwise op (here dot) - x = sparse.SparseType("csc", dtype="float64")() - y = sparse.SparseType("csc", dtype="float64")() + x = sparse.SparseTensorType("csc", dtype="float64")() + y = sparse.SparseTensorType("csc", dtype="float64")() f = function([In(x, mutable=True), In(y, mutable=True)], (x + y) + (x + y)) # Test 1. If the same variable is given twice diff --git a/tests/sparse/test_basic.py b/tests/sparse/test_basic.py index 2a1d6dbe5c..e0f967ea09 100644 --- a/tests/sparse/test_basic.py +++ b/tests/sparse/test_basic.py @@ -38,7 +38,7 @@ Remove0, SamplingDot, SparseFromDense, - SparseType, + SparseTensorType, SquareDiagonal, StructuredDot, StructuredDotGradCSC, @@ -413,7 +413,7 @@ def test_getitem_2d(self): pass def test_getitem_scalar(self): - x = SparseType("csr", dtype=config.floatX)() + x = SparseTensorType("csr", dtype=config.floatX)() self._compile_and_check( [x], [x[2, 2]], @@ -451,7 +451,7 @@ def test_csm_grad(self): ) def test_transpose(self): - x = SparseType("csr", dtype=config.floatX)() + x = SparseTensorType("csr", dtype=config.floatX)() self._compile_and_check( [x], [x.T], @@ -460,7 +460,7 @@ def test_transpose(self): ) def test_neg(self): - x = SparseType("csr", dtype=config.floatX)() + x = SparseTensorType("csr", dtype=config.floatX)() self._compile_and_check( [x], [-x], @@ -469,8 +469,8 @@ def test_neg(self): ) def test_add_ss(self): - x = SparseType("csr", dtype=config.floatX)() - y = SparseType("csr", dtype=config.floatX)() + x = SparseTensorType("csr", dtype=config.floatX)() + y = SparseTensorType("csr", dtype=config.floatX)() self._compile_and_check( [x, y], [x + y], @@ -482,7 +482,7 @@ def test_add_ss(self): ) def test_add_sd(self): - x = SparseType("csr", dtype=config.floatX)() + x = SparseTensorType("csr", dtype=config.floatX)() y = matrix() self._compile_and_check( [x, y], @@ -495,8 +495,8 @@ def test_add_sd(self): ) def test_mul_ss(self): - x = SparseType("csr", dtype=config.floatX)() - y = SparseType("csr", dtype=config.floatX)() + x = SparseTensorType("csr", dtype=config.floatX)() + y = SparseTensorType("csr", dtype=config.floatX)() self._compile_and_check( [x, y], [x * y], @@ -508,7 +508,7 @@ def test_mul_ss(self): ) def test_mul_sd(self): - x = SparseType("csr", dtype=config.floatX)() + x = SparseTensorType("csr", dtype=config.floatX)() y = matrix() self._compile_and_check( [x, y], @@ -522,7 +522,7 @@ def test_mul_sd(self): ) def test_remove0(self): - x = SparseType("csr", dtype=config.floatX)() + x = SparseTensorType("csr", dtype=config.floatX)() self._compile_and_check( [x], [Remove0()(x)], @@ -531,8 +531,8 @@ def test_remove0(self): ) def test_dot(self): - x = SparseType("csc", dtype=config.floatX)() - y = SparseType("csc", dtype=config.floatX)() + x = SparseTensorType("csc", dtype=config.floatX)() + y = SparseTensorType("csc", dtype=config.floatX)() self._compile_and_check( [x, y], [Dot()(x, y)], @@ -545,12 +545,12 @@ def test_dot(self): def test_dot_broadcast(self): for x, y in [ - (SparseType("csr", "float32")(), vector()[:, None]), - (SparseType("csr", "float32")(), vector()[None, :]), - (SparseType("csr", "float32")(), matrix()), - (vector()[:, None], SparseType("csr", "float32")()), - (vector()[None, :], SparseType("csr", "float32")()), - (matrix(), SparseType("csr", "float32")()), + (SparseTensorType("csr", "float32")(), vector()[:, None]), + (SparseTensorType("csr", "float32")(), vector()[None, :]), + (SparseTensorType("csr", "float32")(), matrix()), + (vector()[:, None], SparseTensorType("csr", "float32")()), + (vector()[None, :], SparseTensorType("csr", "float32")()), + (matrix(), SparseTensorType("csr", "float32")()), ]: sparse_out = at.dot(x, y) @@ -562,8 +562,8 @@ def test_dot_broadcast(self): assert dense_out.broadcastable == sparse_out.broadcastable def test_structured_dot(self): - x = SparseType("csc", dtype=config.floatX)() - y = SparseType("csc", dtype=config.floatX)() + x = SparseTensorType("csc", dtype=config.floatX)() + y = SparseTensorType("csc", dtype=config.floatX)() self._compile_and_check( [x, y], [structured_dot(x, y)], @@ -583,8 +583,8 @@ def test_structured_dot_grad(self): ("csc", StructuredDotGradCSC), ("csr", StructuredDotGradCSR), ]: - x = SparseType(format, dtype=config.floatX)() - y = SparseType(format, dtype=config.floatX)() + x = SparseTensorType(format, dtype=config.floatX)() + y = SparseTensorType(format, dtype=config.floatX)() grads = aesara.grad(dense_from_sparse(structured_dot(x, y)).sum(), [x, y]) self._compile_and_check( [x, y], @@ -606,7 +606,7 @@ def test_structured_dot_grad(self): ) def test_dense_from_sparse(self): - x = SparseType("csr", dtype=config.floatX)() + x = SparseTensorType("csr", dtype=config.floatX)() self._compile_and_check( [x], [dense_from_sparse(x)], @@ -1130,7 +1130,7 @@ def test_csm_properties(self): for format in ("csc", "csr"): for dtype in ("float32", "float64"): - x = SparseType(format, dtype=dtype)() + x = SparseTensorType(format, dtype=dtype)() f = aesara.function([x], csm_properties(x)) spmat = sp_types[format](random_lil((4, 3), dtype, 3)) @@ -1288,7 +1288,7 @@ def test_upcast(self): for dense_dtype in typenames: for sparse_dtype in typenames: correct_dtype = aesara.scalar.upcast(sparse_dtype, dense_dtype) - a = SparseType("csc", dtype=sparse_dtype)() + a = SparseTensorType("csc", dtype=sparse_dtype)() b = matrix(dtype=dense_dtype) d = structured_dot(a, b) assert d.type.dtype == correct_dtype @@ -1375,8 +1375,8 @@ def test_dot_sparse_sparse(self): for sparse_format_a in ["csc", "csr", "bsr"]: for sparse_format_b in ["csc", "csr", "bsr"]: - a = SparseType(sparse_format_a, dtype=sparse_dtype)() - b = SparseType(sparse_format_b, dtype=sparse_dtype)() + a = SparseTensorType(sparse_format_a, dtype=sparse_dtype)() + b = SparseTensorType(sparse_format_b, dtype=sparse_dtype)() d = at.dot(a, b) f = aesara.function([a, b], Out(d, borrow=True)) for M, N, K, nnz in [ @@ -1397,7 +1397,7 @@ def test_csc_correct_output_faster_than_scipy(self): sparse_dtype = "float64" dense_dtype = "float64" - a = SparseType("csc", dtype=sparse_dtype)() + a = SparseTensorType("csc", dtype=sparse_dtype)() b = matrix(dtype=dense_dtype) d = at.dot(a, b) f = aesara.function([a, b], Out(d, borrow=True)) @@ -1445,7 +1445,7 @@ def test_csr_correct_output_faster_than_scipy(self): sparse_dtype = "float32" dense_dtype = "float32" - a = SparseType("csr", dtype=sparse_dtype)() + a = SparseTensorType("csr", dtype=sparse_dtype)() b = matrix(dtype=dense_dtype) d = at.dot(a, b) f = aesara.function([a, b], d) @@ -1567,8 +1567,8 @@ def test_sparse_sparse(self): ("csr", "csc"), ("csr", "csr"), ]: - x = sparse.SparseType(format=x_f, dtype=d1)("x") - y = sparse.SparseType(format=x_f, dtype=d2)("x") + x = sparse.SparseTensorType(format=x_f, dtype=d1)("x") + y = sparse.SparseTensorType(format=x_f, dtype=d2)("x") def f_a(x, y): return x * y @@ -1886,7 +1886,7 @@ def test(self): def test_shape_i(): sparse_dtype = "float32" - a = SparseType("csr", dtype=sparse_dtype)() + a = SparseTensorType("csr", dtype=sparse_dtype)() f = aesara.function([a], a.shape[1]) assert f(sp.sparse.csr_matrix(random_lil((100, 10), sparse_dtype, 3))) == 10 @@ -1896,7 +1896,7 @@ def test_shape(): # does not actually create a dense tensor in the process. sparse_dtype = "float32" - a = SparseType("csr", dtype=sparse_dtype)() + a = SparseTensorType("csr", dtype=sparse_dtype)() f = aesara.function([a], a.shape) assert np.all( f(sp.sparse.csr_matrix(random_lil((100, 10), sparse_dtype, 3))) == (100, 10) @@ -1946,7 +1946,7 @@ def as_ar(a): (b.transpose(), a, False), ]: - assert SparseType.may_share_memory(a_, b_) == rep + assert SparseTensorType.may_share_memory(a_, b_) == rep def test_sparse_shared_memory(): @@ -1955,8 +1955,8 @@ def test_sparse_shared_memory(): a = random_lil((3, 4), "float32", 3).tocsr() m1 = random_lil((4, 4), "float32", 3).tocsr() m2 = random_lil((4, 4), "float32", 3).tocsr() - x = SparseType("csr", dtype="float32")() - y = SparseType("csr", dtype="float32")() + x = SparseTensorType("csr", dtype="float32")() + y = SparseTensorType("csr", dtype="float32")() sdot = sparse.structured_dot z = sdot(x * 3, m1) + sdot(y * 2, m2) @@ -1966,7 +1966,7 @@ def test_sparse_shared_memory(): def f_(x, y, m1=m1, m2=m2): return ((x * 3) * m1) + ((y * 2) * m2) - assert SparseType.may_share_memory(a, a) # This is trivial + assert SparseTensorType.may_share_memory(a, a) # This is trivial result = f(a, a) result_ = f_(a, a) assert (result_.todense() == result.todense()).all() @@ -3192,7 +3192,7 @@ def test_mul_s_v(self): for format in ("csr", "csc"): for dtype in ("float32", "float64"): - x = sparse.SparseType(format, dtype=dtype)() + x = sparse.SparseTensorType(format, dtype=dtype)() y = vector(dtype=dtype) f = aesara.function([x, y], mul_s_v(x, y)) @@ -3220,7 +3220,7 @@ def test_structured_add_s_v(self): for format in ("csr", "csc"): for dtype in ("float32", "float64"): - x = sparse.SparseType(format, dtype=dtype)() + x = sparse.SparseTensorType(format, dtype=dtype)() y = vector(dtype=dtype) f = aesara.function([x, y], structured_add_s_v(x, y)) diff --git a/tests/sparse/test_type.py b/tests/sparse/test_type.py index 28ec9a758d..eb6ce331ce 100644 --- a/tests/sparse/test_type.py +++ b/tests/sparse/test_type.py @@ -1,12 +1,12 @@ import pytest from aesara.sparse import matrix as sp_matrix -from aesara.sparse.type import SparseType +from aesara.sparse.type import SparseTensorType from aesara.tensor import dmatrix def test_clone(): - st = SparseType("csr", "float64") + st = SparseTensorType("csr", "float64") assert st == st.clone() diff --git a/tests/sparse/test_var.py b/tests/sparse/test_var.py index 04f571eb74..df2e04cbf8 100644 --- a/tests/sparse/test_var.py +++ b/tests/sparse/test_var.py @@ -7,7 +7,7 @@ import aesara import aesara.sparse as sparse import aesara.tensor as at -from aesara.sparse.type import SparseType +from aesara.sparse.type import SparseTensorType from aesara.tensor.type import DenseTensorType @@ -16,7 +16,7 @@ class TestSparseVariable: "method, exp_type, cm", [ ("__abs__", DenseTensorType, None), - ("__neg__", SparseType, ExitStack()), + ("__neg__", SparseTensorType, ExitStack()), ("__ceil__", DenseTensorType, None), ("__floor__", DenseTensorType, None), ("__trunc__", DenseTensorType, None), @@ -65,7 +65,7 @@ class TestSparseVariable: ("conj", DenseTensorType, None), ("round", DenseTensorType, None), ("trace", DenseTensorType, None), - ("zeros_like", SparseType, ExitStack()), + ("zeros_like", SparseTensorType, ExitStack()), ("ones_like", DenseTensorType, ExitStack()), ("cumsum", DenseTensorType, None), ("cumprod", DenseTensorType, None), @@ -83,7 +83,7 @@ def test_unary(self, method, exp_type, cm): if cm is None: cm = pytest.warns(UserWarning, match=".*converted to dense.*") - if exp_type == SparseType: + if exp_type == SparseTensorType: exp_res_type = csr_matrix else: exp_res_type = np.ndarray @@ -112,16 +112,16 @@ def test_unary(self, method, exp_type, cm): @pytest.mark.parametrize( "method, exp_type", [ - ("__lt__", SparseType), - ("__le__", SparseType), - ("__gt__", SparseType), - ("__ge__", SparseType), + ("__lt__", SparseTensorType), + ("__le__", SparseTensorType), + ("__gt__", SparseTensorType), + ("__ge__", SparseTensorType), ("__and__", DenseTensorType), ("__or__", DenseTensorType), ("__xor__", DenseTensorType), - ("__add__", SparseType), - ("__sub__", SparseType), - ("__mul__", SparseType), + ("__add__", SparseTensorType), + ("__sub__", SparseTensorType), + ("__mul__", SparseTensorType), ("__pow__", DenseTensorType), ("__mod__", DenseTensorType), ("__divmod__", DenseTensorType), @@ -137,7 +137,7 @@ def test_binary(self, method, exp_type): method_to_call = getattr(x, method) - if exp_type == SparseType: + if exp_type == SparseTensorType: exp_res_type = csr_matrix cm = ExitStack() else: @@ -198,7 +198,7 @@ def test_getitem(self): x = sparse.csr_from_dense(x) z = x[:, :2] - assert isinstance(z.type, SparseType) + assert isinstance(z.type, SparseTensorType) f = aesara.function([x], z) exp_res = f([[1.1, 0.0, 2.0], [-1.0, 0.0, 0.0]]) @@ -211,7 +211,7 @@ def test_dot(self): y = sparse.csr_from_dense(y) z = x.__dot__(y) - assert isinstance(z.type, SparseType) + assert isinstance(z.type, SparseTensorType) f = aesara.function([x, y], z) exp_res = f( diff --git a/tests/typed_list/test_basic.py b/tests/typed_list/test_basic.py index 52f7b81047..3192719ea8 100644 --- a/tests/typed_list/test_basic.py +++ b/tests/typed_list/test_basic.py @@ -451,7 +451,7 @@ def test_non_tensor_type(self): def test_sparse(self): sp = pytest.importorskip("scipy") mySymbolicSparseList = TypedListType( - sparse.SparseType("csr", aesara.config.floatX) + sparse.SparseTensorType("csr", aesara.config.floatX) )() mySymbolicSparse = sparse.csr_matrix() @@ -519,7 +519,7 @@ def test_non_tensor_type(self): def test_sparse(self): sp = pytest.importorskip("scipy") mySymbolicSparseList = TypedListType( - sparse.SparseType("csr", aesara.config.floatX) + sparse.SparseTensorType("csr", aesara.config.floatX) )() mySymbolicSparse = sparse.csr_matrix()