使用Numpy编写层

2021-07-21  本文已影响0人  small瓜瓜
import random

import numpy as np


# coding: utf-8
class MulLayer:
    def __init__(self):
        self.x = None
        self.y = None

    def forward(self, x, y):
        self.x = x
        self.y = y
        out = x * y

        return out

    def backward(self, dout):
        dx = dout * self.y
        dy = dout * self.x

        return dx, dy


class AddLayer:
    def __init__(self):
        pass

    def forward(self, x, y):
        out = x + y

        return out

    def backward(self, dout):
        dx = dout * 1
        dy = dout * 1

        return dx, dy


class PowerLayer:
    def __init__(self, power):
        self.x = None
        self.power = power
        pass

    def forward(self, x):
        out = x ** self.power
        self.x = x
        return out

    def backward(self, dout):
        dy = dout * self.power * self.x ** (self.power - 1)

        return dy


def get_data(count):
    x_data = []
    t_data = []
    for _ in range(count):
        x1 = random.random() * 20 - 10
        x2 = random.random() * 20 - 10
        x_data.append([x1, x2])

        y = 5 * x1 + 6 * x2 + 10.1
        t_data.append(y)

    return np.array(x_data), np.array(t_data)


epoch = 10
train_size = 1000
test_size = 100
learning_rate = 1e-2

x_train, t_train = get_data(train_size)
x_test, t_test = get_data(test_size)

w1, w2, b = np.random.randn(3)
mul1 = MulLayer()
mul2 = MulLayer()
add1 = AddLayer()
add2 = AddLayer()
add3 = AddLayer()
pow1 = PowerLayer(2)

for _ in range(epoch):
    for i in range(train_size):
        x1, x2 = x_train[i]
        t = t_train[i]

        # forward
        z1 = mul1.forward(x1, w1)
        z2 = mul2.forward(x2, w2)
        z3 = add1.forward(z1, z2)
        z4 = add2.forward(z3, b)
        z5 = add3.forward(z4, -t)
        z6 = pow1.forward(z5)
        print(f'循环:{i} , loss:{z6}')

        # backward
        dw1 = 2 * x1 * z5
        dw2 = 2 * x2 * z5
        db = 2 * z5

        w1 -= learning_rate * dw1
        w2 -= learning_rate * dw2
        b -= learning_rate * db

上一篇 下一篇

猜你喜欢

热点阅读