diff --git a/doc/library/compile/io.rst b/doc/library/compile/io.rst index 272d4754db..a62f69fdb9 100644 --- a/doc/library/compile/io.rst +++ b/doc/library/compile/io.rst @@ -189,9 +189,9 @@ To show some examples of these access methods... >>> fn = function([a, b, ((c, c+a+b), 10.0)], []) >>> # the value associated with c is accessible in 3 ways ->>> fn['s'] is fn.value[c] +>>> fn['s'] is fn.data[c] True ->>> fn['s'] is fn.container[c].value +>>> fn['s'] is fn.container[c].data True >>> fn['s'] @@ -205,14 +205,14 @@ array(13.0) [] >>> fn['s'] array(100.0) ->>> fn.value[c] = 99.0 +>>> fn.data[c] = 99.0 >>> fn(1,0) [] >>> fn['s'] array(100.0) ->>> fn['s'] == fn.value[c] +>>> fn['s'] == fn.data[c] True ->>> fn['s'] == fn.container[c].value +>>> fn['s'] == fn.container[c].data True diff --git a/doc/library/tensor/basic.rst b/doc/library/tensor/basic.rst index 4f087b6788..5a2fb6cb3a 100644 --- a/doc/library/tensor/basic.rst +++ b/doc/library/tensor/basic.rst @@ -287,10 +287,10 @@ precise) is by calling :func:`pytensor.shared` x = pytensor.shared(np.random.standard_normal((3, 4))) -This will return a :term:`shared variable ` whose ``.value`` is +This will return a :term:`shared variable ` whose ``.data`` is a NumPy `ndarray`. The number of dimensions and dtype of the `Variable` are inferred from the `ndarray` argument. The argument to `shared` *will not be -copied*, and subsequent changes will be reflected in ``x.value``. +copied*, and subsequent changes will be reflected in ``x.data``. For additional information, see the :func:`shared() ` documentation. diff --git a/pytensor/compile/function/types.py b/pytensor/compile/function/types.py index 9cc85f3d24..4774612de7 100644 --- a/pytensor/compile/function/types.py +++ b/pytensor/compile/function/types.py @@ -508,7 +508,7 @@ def __init__( # there is no need to refeed the default value. assert not refeed else: - c.value = value + c.data = value c.required = required c.implicit = input.implicit # this is a count of how many times the input has been @@ -531,7 +531,7 @@ def __init__( self.inv_finder = inv_finder # this class is important in overriding the square-bracket notation: - # fn.value[x] + # fn.data[x] # self reference is available via the closure on the class class ValueAttribute: def __getitem__(self, item): @@ -546,7 +546,7 @@ def __getitem__(self, item): "for duplicates." ) if isinstance(s, Container): - return s.value + return s.data else: raise NotImplementedError @@ -564,7 +564,7 @@ def __setitem__(self, item, value): "for duplicates." ) if isinstance(s, Container): - s.value = value + s.data = value s.provided += 1 else: s(value) @@ -1624,11 +1624,11 @@ def __init__( self.name = name self.trust_input = trust_input - self.required = [(i.value is None) for i in self.inputs] + self.required = [(i.data is None) for i in self.inputs] self.refeed = [ ( - i.value is not None - and not isinstance(i.value, Container) + i.data is not None + and not isinstance(i.data, Container) and i.update is None ) for i in self.inputs @@ -1898,10 +1898,10 @@ def convert_function_input(input): if len(input) == 1: return input[0] elif len(input) == 2: - input, value = input + input, data = input if name is not None: input.name = name - input.value = value + input.data = data return input else: raise TypeError(f"The input specification is not valid: {input}") diff --git a/pytensor/graph/basic.py b/pytensor/graph/basic.py index 512f0ef3ab..23e76b15f1 100644 --- a/pytensor/graph/basic.py +++ b/pytensor/graph/basic.py @@ -827,10 +827,6 @@ def owner(self, value) -> None: if value is not None: raise ValueError("Constant instances cannot have an owner.") - @property - def value(self): - return self.data - def walk( nodes: Iterable[T], diff --git a/pytensor/link/numba/dispatch/sparse.py b/pytensor/link/numba/dispatch/sparse.py index e25083e92d..0affe9c036 100644 --- a/pytensor/link/numba/dispatch/sparse.py +++ b/pytensor/link/numba/dispatch/sparse.py @@ -108,10 +108,10 @@ def unbox_matrix(typ, obj, c): indptr = c.pyapi.object_getattr_string(obj, "indptr") shape = c.pyapi.object_getattr_string(obj, "shape") - struct_ptr.data = c.unbox(typ.data, data).value - struct_ptr.indices = c.unbox(typ.indices, indices).value - struct_ptr.indptr = c.unbox(typ.indptr, indptr).value - struct_ptr.shape = c.unbox(typ.shape, shape).value + struct_ptr.data = c.unbox(typ.data, data).data + struct_ptr.indices = c.unbox(typ.indices, indices).data + struct_ptr.indptr = c.unbox(typ.indptr, indptr).data + struct_ptr.shape = c.unbox(typ.shape, shape).data c.pyapi.decref(data) c.pyapi.decref(indices) diff --git a/pytensor/scan/rewriting.py b/pytensor/scan/rewriting.py index 2ba282d8d6..80ed10eb9f 100644 --- a/pytensor/scan/rewriting.py +++ b/pytensor/scan/rewriting.py @@ -1442,7 +1442,7 @@ def scan_save_mem(fgraph, node): if ( i <= op.info.n_mit_mot and isinstance(this_slice[0], ScalarConstant) - and this_slice[0].value == -1 + and this_slice[0].data == -1 ): start = nw_steps - 1 else: @@ -1728,7 +1728,7 @@ def scan_save_mem(fgraph, node): # Special case when only last value is requested if ( isinstance(old_slices[0], ScalarConstant) - and old_slices[0].value == -1 + and old_slices[0].data == -1 ): position = old_slices[0] else: diff --git a/pytensor/tensor/extra_ops.py b/pytensor/tensor/extra_ops.py index 7a1bc75b0b..fee0a4a64b 100644 --- a/pytensor/tensor/extra_ops.py +++ b/pytensor/tensor/extra_ops.py @@ -1564,7 +1564,7 @@ def broadcast_shape_iter( (one,) * (max_dims - len(a)) + tuple( one - if sh == 1 or isinstance(sh, Constant) and sh.value == 1 + if sh == 1 or isinstance(sh, Constant) and sh.data == 1 else (ps.as_scalar(sh) if not isinstance(sh, Variable) else sh) for sh in a ) @@ -1603,7 +1603,7 @@ def broadcast_shape_iter( const_nb_shapes: set[Variable] = set() for shape in non_bcast_shapes: if isinstance(shape, Constant): - const_nb_shapes.add(shape.value.item()) + const_nb_shapes.add(shape.data.item()) else: nonconst_nb_shapes.add(shape) diff --git a/pytensor/tensor/rewriting/math.py b/pytensor/tensor/rewriting/math.py index 9694a022e3..21e9e6c2e8 100644 --- a/pytensor/tensor/rewriting/math.py +++ b/pytensor/tensor/rewriting/math.py @@ -2016,7 +2016,7 @@ def local_mul_to_sqr(fgraph, node): @node_rewriter([int_div]) def local_intdiv_by_one(fgraph, node): """x // 1 -> x""" - if isinstance(node.inputs[1], TensorConstant) and np.all(node.inputs[1].value == 1): + if isinstance(node.inputs[1], TensorConstant) and np.all(node.inputs[1].data == 1): return [node.inputs[0].astype(node.outputs[0].dtype)]