TF2-03求导

2020-02-21  本文已影响0人  杨强AT南京

API说明

  1. Graph模式使用的是gradients函数:
    tf.gradients(
        ys,
        xs,
        grad_ys=None,
        name='gradients',
        gate_gradients=False,
        aggregation_method=None,
        stop_gradients=None,
        unconnected_gradients=tf.UnconnectedGradients.NONE
    )
    tf.stop_gradient(
        input,
        name=None
    )
  1. Eager模式使用的是GradientTape类
    • 提供with上下文模式。
    • 提供如下几个函数完成梯度计算
      1. gradient
      2. watch
      3. reset
      4. stop_recording:创建自己的上下文环境。在这个环境中的操作不被跟踪。
      5. jacobian

    __init__(
        persistent=False,
        watch_accessed_variables=True
    )

自动求导使用例子

Graph模式使用例子

import tensorflow  as tf

一阶简单导数

import tensorflow  as tf
g = tf.Graph()
with g.as_default():
    x = tf.constant(1.0)
    y = x * x
    grad = tf.gradients(y, x)

sess = tf.compat.v1.Session(graph=g)
re = sess.run(grad)
print(re)
[2.0]

多元求导

g = tf.Graph()
with g.as_default():
    x = tf.constant(1.0)
    y = tf.constant(2.0)
    z = x ** 2 + y ** 3
    grad = tf.gradients(z, [x, y])

sess = tf.compat.v1.Session(graph=g)
re = sess.run(grad)
print(re)
[2.0, 12.0]

聚合求导

# 如果ys是列表,则计算导数和, 如果xs是列表,与上面一样,作为求导变量
g = tf.Graph()
with g.as_default():
    x = tf.constant(2.0)
    y_1 = x * x
    y_2 = x ** 3
    grad = tf.gradients([y_1, y_2], x)

sess = tf.compat.v1.Session(graph=g)
re = sess.run(grad)
print(re)
[16.0]
# 如果ys是列表,则计算导数和, 如果xs是列表,与上面一样,作为求导变量
g = tf.Graph()
with g.as_default():
    x = tf.constant(2.0)
    y_1 = x * x
    y_2 = x ** 3
    grad = tf.gradients([y_1, y_2], x, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)

sess = tf.compat.v1.Session(graph=g)
re = sess.run(grad)
print(re)
[16.0]

grad_ys用来设置(hold)初始梯度

g = tf.Graph()
with g.as_default():
    x = tf.constant(2.0)
    y_1 = x * x
    y_2 = x ** 3
    grad = tf.gradients([y_1, y_2], x, grad_ys=[0.3, 0.7])

sess = tf.compat.v1.Session(graph=g)
re = sess.run(grad)
print(re)   # 9.6
[9.599999]

高阶梯度计

g = tf.Graph()
with g.as_default():
    x = tf.constant(2.0)
    y = x ** 4
    grad_1 = tf.gradients(y, x)
    grad_2 = tf.gradients(grad_1, x)

sess = tf.compat.v1.Session(graph=g)
re = sess.run(grad_2)
print(re) 
[48.0]

链式求导与stop_gradients的使用

g = tf.Graph()
with g.as_default():
    x = tf.constant(2.0)
    y = x * x
    z = 5 * tf.sqrt(y)
    grad = tf.gradients(z, x)

sess = tf.compat.v1.Session(graph=g)
re = sess.run(grad)
print(re) 
[5.0]
g = tf.Graph()
with g.as_default():
    x = tf.constant(2.0)
    y = x * x
    z = 5 * tf.sqrt(y)
    grad = tf.gradients(z, x, stop_gradients=[y], unconnected_gradients=tf.UnconnectedGradients.ZERO)   # NONE

sess = tf.compat.v1.Session(graph=g)
re = sess.run(grad)
print(re) 
[0.0]
g = tf.Graph()
with g.as_default():
    x = tf.constant(2.0)
    y = x * x
    z = 5 * tf.sqrt(y)
    grad = tf.gradients(z, y, stop_gradients=[y])  
    # grad = tf.gradients(z, y)  

sess = tf.compat.v1.Session(graph=g)
re = sess.run(grad)
print(re) 
[1.25]
g = tf.Graph()
with g.as_default():
    x = tf.constant(2.0)
    y = x * x
    y_ = tf.stop_gradient(y)   # 返回一个不给求导跟踪的Tensor。
    z = 5 * tf.sqrt(y_)
    grad = tf.gradients(z, x, unconnected_gradients=tf.UnconnectedGradients.ZERO)   # NONE
    # grad = tf.gradients(z, y_)   # NONE

sess = tf.compat.v1.Session(graph=g)
re = sess.run(grad)
print(re) 
[0.0]

gate_gradients参数的使用

import tensorflow  as tf
g = tf.Graph()
with g.as_default():
    x = tf.constant(1.0)
    y = x * x
    grad = tf.gradients(y, x, gate_gradients=True)

sess = tf.compat.v1.Session(graph=g)
re = sess.run(grad)
print(re)
[2.0]

使用求导来实现函数的最小值与最大值求解。

import tensorflow  as tf

vals = []   # 

g = tf.Graph()
sess = tf.compat.v1.Session(graph=g)
with g.as_default():
    x = tf.constant(0.1)
    rate = tf.constant(0.1)
    N = 40
    for i in range(N):
        y = 2 * ((x - 2) ** 2) + 3
        grad = tf.gradients(y, x)
        x -= rate * grad
        v = sess.run(x)
        x = tf.constant(v[0])
        vals.append(v[0])
        
print(F"------ 极值点:{v[0]}")   # 最后一次极值点
print(F"------ 极值:{(lambda x: 2 * ((x - 2) ** 2) + 3)(v[0])}")

# 可视化逼近过程
%matplotlib inline
import matplotlib.pyplot as plt

plt.plot(range(N), vals)
plt.show()

------ 极值点:1.9999998807907104
------ 极值:3.0000000000000284
使用导数求极值的过程可视化

Eager模式使用例子

编程模式

# 1. 构建tf.GradientTape对象
gp = tf.GradientTape()
x = tf.constant(5.0)

# 2. 开启tf.GradientTape上下文跟踪
with gp:
    # 3. 在上下文中定义求导函数 与求导变量
    gp.watch(x)    # 如果没有这个watch,求导就是None
    y = x * x      # 求导函数

# 4. 调用GradientTape对象对象实现计算
grad = gp.gradient(y, x)
print(grad)

tf.Tensor(10.0, shape=(), dtype=float32)

tf.GradientTape的持久性

gp = tf.GradientTape(persistent=True)   # 注意这个参数是True与False差异
x = tf.constant(5.0)
with gp:
    gp.watch(x)   
    y = x * x  

grad = gp.gradient(y, x)
print(grad)
grad = gp.gradient(y, x)
print(grad)
tf.Tensor(10.0, shape=(), dtype=float32)
tf.Tensor(10.0, shape=(), dtype=float32)

对Variable可训练类型的自动跟踪

gp = tf.GradientTape() 
x = tf.Variable(5.0)
with gp:
    y = x * x  

grad = gp.gradient(y, x)
print(grad)
tf.Tensor(10.0, shape=(), dtype=float32)

多元求导

gp = tf.GradientTape(persistent=True)
x = tf.constant(5.0)
y = tf.constant(2.0)

with gp:
    gp.watch(x)  
    gp.watch(y)  
    z = x ** 2 + 2 * y

grad_x_y = gp.gradient(z, [x, y])
grad_x = gp.gradient(z, [x])
grad_y = gp.gradient(z, y)
print(grad_x_y)
print(grad_x)
print(grad_y)     # 注意返回的类型:Tensor与list的差别
[<tf.Tensor: shape=(), dtype=float32, numpy=10.0>, <tf.Tensor: shape=(), dtype=float32, numpy=2.0>]
[<tf.Tensor: shape=(), dtype=float32, numpy=10.0>]
tf.Tensor(2.0, shape=(), dtype=float32)

聚合求导

gp = tf.GradientTape()
x = tf.constant(5.0)

with gp:
    gp.watch(x)  
    y_1 = x ** 2 
    y_2 = 2 * x

grad = gp.gradient([y_1, y_2], x)
print(grad)

tf.Tensor(12.0, shape=(), dtype=float32)

对GradientTape上下文停止跟踪

@contextmanager
stop_recording()
gp = tf.GradientTape()
x = tf.constant(5.0)

with gp:
    gp.watch(x)  
    y_1 = x ** 2 
    with gp.stop_recording():    # 停止跟踪
        y_2 = 2 * x

grad = gp.gradient([y_1, y_2], x)    # y_2 返回 0 ,因为没有提供求导跟踪,聚合的时候作为0使用。
print(grad)
tf.Tensor(10.0, shape=(), dtype=float32)

reset()提供跟踪重置

gp = tf.GradientTape()
x = tf.constant(5.0)

with gp:
    gp.watch(x)  
    y_1 = x ** 2 
    gp.reset()
    gp.watch(x)        # x被清除需要重新watch, y_1被清除
    y_2 = 2 * x

grad = gp.gradient([y_1, y_2], x)    # x被清除,y_1被清除
print(grad)
tf.Tensor(2.0, shape=(), dtype=float32)

向量与矩阵的导数

gp = tf.GradientTape(persistent=True)
x = tf.constant([5.0, 2.0])

with gp:
    gp.watch(x)  
    y = x ** 2

grad = gp.gradient(y, x)
print(grad)
jaco = gp.jacobian(y, x)
print(jaco)
tf.Tensor([10.  4.], shape=(2,), dtype=float32)
tf.Tensor(
[[10.  0.]
 [ 0.  4.]], shape=(2, 2), dtype=float32)

检测自动跟踪的Variable变量

gp = tf.GradientTape() 
x = tf.Variable(5.0)
with gp:
    y = x * x  

vars = gp.watched_variables()
print(vars)
(<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=5.0>,)

使用GradientTape实现极小值点计算

# 小封装一下函数
def cal_grad(dy, dx):
    gp = tf.GradientTape() 
    tx = dx
    with gp:
        ty = dy(tx)
    grad = gp.gradient(ty, tx)
    return grad

vals = []
N  = 50
rate = tf.constant(0.1)
f = lambda p : 2 * ((p - 2) ** 2) + 3
x = tf.Variable(0.1)

for i in  range(N):
    delta = cal_grad(f, x)
    x.assign_sub(rate * delta)
    vals.append(x.numpy())

print(F"------ 极值点:{x.numpy()}")   # 最后一次极值点
print(F"------ 极值:{(lambda x: 2 * ((x - 2) ** 2) + 3)(x.numpy())}")

# 可视化逼近过程
%matplotlib inline
import matplotlib.pyplot as plt

plt.plot(range(N), vals)
plt.show()

------ 极值点:1.9999998807907104
------ 极值:3.0000000000000284
使用梯度求极值的过程可视化

关于gradient成员函数的参数

  1. output_gradients参数
    • 权重参数(具体看上面图模式编程说明)
  2. unconnected_gradients参数
    • 终止求导后的处理方式
gradient(
    target,
    sources,
    output_gradients=None,
    unconnected_gradients=tf.UnconnectedGradients.NONE
)
gp = tf.GradientTape(persistent=True) 
x = tf.Variable(5.0)
with gp:
    y = x * x  
    z = 2 * y + tf.sqrt(y) 
    s = 3 * (x ** 2)

vars = gp.watched_variables()
print(vars)
a = [0.1, 0.9]

z_x = gp.gradient(z, x)
print(F"z_x = {z_x.numpy()}")
s_x = gp.gradient(s, x)
print(F"y_x = {s_x.numpy()}")

print(F"手工权重:z_x + z_y = {  a[0] * z_x.numpy() + a[1] * s_x.numpy()}")
print(F"手工无权重:z_x + z_y = {  z_x.numpy() + s_x.numpy()}")

grad = gp.gradient([z, s], x, output_gradients=a)
print(F"权重:z_x + z_y = {grad.numpy()}")

grad = gp.gradient([z, s], x)
print(F"无权重:z_x + z_y = {grad.numpy()}")


(<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=5.0>,)
z_x = 21.0
y_x = 30.0
手工权重:z_x + z_y = 29.1
手工无权重:z_x + z_y = 51.0
权重:z_x + z_y = 29.09999656677246
无权重:z_x + z_y = 51.0

高阶导数

x = tf.Variable(4.0)

with tf.GradientTape() as g_2:         # 二阶导数
    # g_2.watch(x)
    with tf.GradientTape() as g_1:     # 一阶导数
        # g_1.watch(x)
        y = x ** 3
    dy_dx = g_1.gradient(y, x)     # 1阶

d2y_dx2 = g_2.gradient(dy_dx, x)  # 2阶
print(F"一阶导数{dy_dx}")
print(F"二阶导数{d2y_dx2}")
一阶导数48.0
二阶导数24.0

上一篇下一篇

猜你喜欢

热点阅读