Answer the question
In order to leave comments, you need to log in
What is this backpropagation optimization technique?
There is a backpropagation code ( Link )
Clipping:
def backpropagation(y):
w = np.zeros((x.shape[1], 1))
iteration = 0
while True: # for iteration in range(1, 51):
iteration += 1
error_max = 0
for i in range(x.shape[0]):
error = y[i] - x[i].dot(w).sum()
error_max = max(error, error_max)
# print('Error', error_max, error)
for j in range(x.shape[1]):
delta = x[i][j] * error
w[j] += delta
# print('Δw{} = {}'.format(j, delta))
print('№{}: {}'.format(iteration, error_max)) #
if error_max < fault:
break
return w
error = y - x.dot(weights).sum()
weights += x * error
def backpropagation(y):
w = np.zeros((x.shape[1], 1))
iteration = 0
def gradient(f, x):
return derivative(f, x, 1e-6)
while True: # for iteration in range(1, 51):
iteration += 1
error_max = 0
for i in range(x.shape[0]):
f = lambda o: y[i] - x[i].dot(o).sum()
error = f(w)
# print(error)
error_max = max(error, error_max)
# print('Error', error_max, error)
antigrad = -1 * gradient(f, w)
# print('-∇ = {}'.format(antigrad)) #
n = 2
delta = error * antigrad * n
for j in range(x.shape[1]):
w[j] += delta * x[i][j]
# print('Δw{} = {}'.format(j, delta))
print('№{}: {}'.format(iteration, error_max)) #
if error_max < fault:
break
return w
error = y - x.dot(weights).sum()
delta = error * antigradient * n
weights += delta * x
Answer the question
In order to leave comments, you need to log in
The difference, as I understand it, is that in the second case there is also an activation function (f) and something like a learning rate (n). Therefore, they participate in the calculation of delta.
Didn't find what you were looking for?
Ask your questionAsk a Question
731 491 924 answers to any question