自动求导机制是pytorch中非常重要的性质,免去了手动计算导数,为构建模型节省了时间。下面介绍自动求导机制的基本用法。
#自动求导机制
import torch
from torch.autograd import Variable# 1、简单的求导(求导对象是标量)
x = Variable(torch.Tensor([2]),requires_grad=True)
y = (x + 2) ** 2 + 3
print(y)
y.backward()
print(x.grad)#对矩阵求导
x1 = Variable(torch.randn(10,20),requires_grad=True)
y1 = Variable(torch.randn(10,1),requires_grad=True)
W = Variable(torch.randn(20,1),requires_grad=True)J = torch.mean(y1 – torch.matmul(x1,W)) #matmul表示做矩阵乘法
J.backward()
print(x1.grad)
print(y1.grad)
print(W.grad)
tensor([19.], grad_fn=<AddBackward0>) tensor([8.]) tensor([[-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544, 0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144, -0.1581, 0.1986, -0.0226, -0.0454], [-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544, 0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144, -0.1581, 0.1986, -0.0226, -0.0454], [-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544, 0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144, -0.1581, 0.1986, -0.0226, -0.0454], [-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544, 0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144, -0.1581, 0.1986, -0.0226, -0.0454], [-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544, 0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144, -0.1581, 0.1986, -0.0226, -0.0454], [-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544, 0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144, -0.1581, 0.1986, -0.0226, -0.0454], [-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544, 0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144, -0.1581, 0.1986, -0.0226, -0.0454], [-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544, 0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144, -0.1581, 0.1986, -0.0226, -0.0454], [-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544, 0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144, -0.1581, 0.1986, -0.0226, -0.0454], [-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544, 0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144, -0.1581, 0.1986, -0.0226, -0.0454]]) tensor([[0.1000], [0.1000], [0.1000], [0.1000], [0.1000], [0.1000], [0.1000], [0.1000], [0.1000], [0.1000]]) tensor([[ 0.0224], [ 0.0187], [-0.2078], [ 0.5092], [ 0.0677], [ 0.3497], [-0.4575], [-0.5480], [ 0.4228], [-0.0869], [ 0.2876], [-0.1714], [ 0.0985], [-0.1364], [-0.1502], [-0.1372], [-0.0999], [-0.0006], [-0.0544], [-0.0678]])
#复杂情况的自动求导 多维数组自动求导机制
import torch
from torch.autograd import Variablex = Variable(torch.FloatTensor([3]),requires_grad=True)
y = x ** 2 + x * 2 + 3
y.backward(retain_graph=True) #保留计算图
print(x.grad)
y.backward()#不保留计算图
print(x.grad) #得到的是第一次求导的值加上第二次求导的值 8 + 8
tensor([8.]) tensor([16.])
#小练习,向量对向量求导
import torch
from torch.autograd import Variablex = Variable(torch.Tensor([2,3]),requires_grad = True)
k = Variable(torch.zeros_like(x))k[0] = x[0]**2 + 3 * x[1]
k[1] = 2*x[0] + x[1] ** 2print(k)
j = torch.zeros(2,2)
k.backward(torch.FloatTensor([1,0]),retain_graph = True)
j[0] = x.grad.datax.grad.zero_()
k.backward(torch.FloatTensor([0,1]),retain_graph = True)
j[1] = x.grad.data
print(j)
tensor([13., 13.], grad_fn=<CopySlices>) tensor([[4., 3.], [2., 6.]])