audograd

See

FAQ:

_saved_self, _saved_other, _saved_result, _saved_exponent

  1. x*2, multiply with a scalar

x = torch.tensor([1.2, 3], requires_grad=True)
y = x*2
print(y.grad_fn.__class__)  # '<MulBackward0>'
print(y.grad_fn._saved_self) # None
print(y.grad_fn._saved_other) # tensor(2)
  1. x * a, multiply with a tensor that requires grad

x = torch.tensor([1.2, 3], requires_grad=True)
a = torch.tensor([10.2], requires_grad=True)
y = x*a
print(y.grad_fn.__class__)  # '<MulBackward0>'
print(y.grad_fn._saved_self) # tensor([1.2, 3], requires_grad=True)
print(y.grad_fn._saved_other) # tensor([10.2], requires_grad=True)

print(y.grad_fn._saved_self is x) # True
print(y.grad_fn._saved_other is a) # True
  1. x * a, multiply with a tensor that does not require grad

x = torch.tensor([1.2, 3], requires_grad=True)
a = torch.tensor([10.2])
y = x*a
print(y.grad_fn.__class__)  # '<MulBackward0>'
print(y.grad_fn._saved_self) # None
print(y.grad_fn._saved_other) # tensor([10.2])

print(y.grad_fn._saved_other is a) # True
  1. sin(x)

x = torch.tensor([1.2, 3], requires_grad=True)
y = x.sin()
print(y.grad_fn.__class__)  # '<SinBackward0>'
print(y.grad_fn._saved_self is x) # True
  1. x.relu

x = torch.tensor([1.2, 3], requires_grad=True)
y = x.relu()
print(y.grad_fn.__class__)  # '<ReluBackward0>'
print(y.grad_fn._saved_result.equal(y)) # True
print(y.grad_fn._saved_result is y) # False
  1. x.log()

x = torch.tensor([1.2, 3], requires_grad=True)
y = x.log()
print(y.grad_fn.__class__)  # '<LogBackward0>'
print(y.grad_fn._saved_self is x) # True

Example 1

Where is Mulbackward0 defined