@@ -560,7 +560,60 @@ class Tensor:
560
560
def __array_ufunc__ (
561
561
self , ufunc : Type [np .ufunc ], method : str , * inputs : ArrayLike , ** kwargs
562
562
) -> Union ["Tensor" , np .ndarray ]:
563
+ """An interface provided by NumPy to override the behavior of its ufuncs [1]_.
563
564
565
+ MyGrad implements its own ufuncs for all differentiable NumPy ufuncs.
566
+
567
+ Non-differentiable numpy ufuncs simply get called on the underlying arrays of tensors and
568
+ will return ndarrays.
569
+
570
+ The differentiability - or lack thereof - of ufuncs may not be obvious to end users.
571
+ Thus potentially ambiguous ufuncs (e.g. `numpy.ceil`) will be made to raise on non-constant
572
+ tensors so that the lack of differentiability is made obvious to the users. This design decision
573
+ is made in the same spirit as requiring integer-dtype tensors be constant.
574
+
575
+ References
576
+ ----------
577
+ .. [1] https://numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_ufunc__
578
+
579
+ Examples
580
+ --------
581
+ NumPy ufuncs that represent differentiable operations are overloaded by MyGrad tensors
582
+ so that they support backprop
583
+
584
+ >>> import mygrad as mg
585
+ >>> import numpy as np
586
+
587
+ >>> x = mg.tensor([1., 2.])
588
+
589
+ This calls ``mygrad.sin`` under the hood.
590
+
591
+ >>> np.sin(x) # returns a tensor
592
+ Tensor([0.84147098, 0.90929743])
593
+
594
+ >>> np.sin(x).backward()
595
+ >>> x.grad # note: derivative of
596
+ array([ 0.54030231, -0.41614684])
597
+
598
+ Specifying a dtype, a ``where`` mask, an in-place target (via ``out``) as an array
599
+ or a tensor, are all supported.
600
+
601
+ >>> x = mg.tensor([1., 2.])
602
+ >>> y = mg.tensor([-1., -1.])
603
+ >>> np.exp(x, where=[False, True], out=y)
604
+ Tensor([-1. , 7.3890561])
605
+ >>> y.backward()
606
+ >>> x.grad
607
+ array([0. , 7.3890561])
608
+
609
+ Non-differentiable NumPy ufuncs simply operate on the ndarrays that are wrapped
610
+ by MyGrad tensors; these return ndarrays, which will appropriately and explicitly
611
+ serve as constants elsewhere in a computational graph.
612
+
613
+ >>> x = mg.tensor([1., 2.])
614
+ >>> np.less_equal(x, 1)
615
+ array([ True, False])
616
+ """
564
617
out = kwargs .pop ("out" , (None ,))
565
618
if len (out ) > 1 : # pragma: no cover
566
619
raise ValueError (
@@ -576,9 +629,11 @@ def __array_ufunc__(
576
629
except KeyError :
577
630
pass
578
631
632
+ # non-differentiable ufuncs get called on numpy arrays stored by tensors
579
633
if ufunc in _REGISTERED_BOOL_ONLY_UFUNC :
580
634
caster = asarray
581
635
elif ufunc in _REGISTERED_CONST_ONLY_UFUNC :
636
+ # the presence of non-constant tensors will raise
582
637
caster = _as_constant_array
583
638
else : # pragma: no cover
584
639
return NotImplemented
@@ -1802,21 +1857,11 @@ def __truediv__(self, other: ArrayLike) -> "Tensor":
1802
1857
def __rtruediv__ (self , other : ArrayLike ) -> "Tensor" :
1803
1858
return self ._op (Divide , other , self )
1804
1859
1805
- def __floordiv__ (self , other : ArrayLike ) -> "Tensor" :
1806
- if not self .constant :
1807
- raise ValueError (
1808
- "Floor division cannot involve non-constant mygrad tensors."
1809
- )
1810
- if isinstance (other , Tensor ):
1811
- other = other .data
1812
- return type (self )(self .data .__floordiv__ (other ), constant = True )
1860
+ def __floordiv__ (self , other : ArrayLike ) -> np .ndarray :
1861
+ return np .floor_divide (self , other )
1813
1862
1814
- def __rfloordiv__ (self , other : ArrayLike ) -> "Tensor" :
1815
- if not self .constant :
1816
- raise ValueError (
1817
- "Floor division cannot involve non-constant mygrad tensors."
1818
- )
1819
- return type (self )(self .data .__rfloordiv__ (other ), constant = True )
1863
+ def __rfloordiv__ (self , other : ArrayLike ) -> np .ndarray :
1864
+ return np .floor_divide (other , self )
1820
1865
1821
1866
def __itruediv__ (self , other : ArrayLike ) -> "Tensor" :
1822
1867
self ._in_place_op (Divide , self , other )
0 commit comments