A
A
Agamendon2020-04-29 16:47:53
Python
Agamendon, 2020-04-29 16:47:53

How to set up a neural network correctly?

I decided to write an XOR neural network following the example from here:
As a result, the network correctly calculates the answer, but with back propagation, not everything is so smooth. When testing with the same input values, when entering an example from the article, the miscalculation is normal, when you enter 1, 1, the answer value first falls to zero, then tends to 0.5. With random numbers per input, as a rule at some point the answer value drops below 0.3 and in most cases is identical for any input.
I would be very grateful for any help.
Neuron classes:

import math
from classes import *


class Neuron:

    def __init__(self, previousLayer, selfValue, weightsToNextLayer):
        self.selfValue = selfValue
        self.previousLayer = previousLayer
        self.weightsToNextLayer = weightsToNextLayer
        self.numberInLayer = None
        self.sumFromNextLayer = 0
        self.nextLayer = None
        self.network = None
        self.previousIterations = None
        if self.weightsToNextLayer:
            self.previousIterations = [0 for x in range(len(self.weightsToNextLayer))]

    def counterVectors(self):
        counter = 0
        if self.previousLayer:
            for x in range(self.previousLayer.getLengthOfLayer()):
                counter+= self.previousLayer.getNeuronValueByNumber(x) * self.previousLayer.getNeuronWeightByNumber(x, self.numberInLayer)
        return counter

    def sigmoid(self):
        vectorsResult = self.counterVectors()
        return 1 / (1 + math.exp(-vectorsResult))

    def selfValueDefine(self):
        self.selfValue = self.sigmoid()

    def getSelfValue(self):
        return self.selfValue

    def setNumberInLayer(self, number):
        self.numberInLayer = number

    def functionIn(self):
        return (1 - self.selfValue) * self.selfValue

    def gradient(self, number):
        return self.selfValue * self.nextLayer.neuronsInLayer[number].delta()

    def deltaWeight(self, number):
        return self.network.epsilon * self.gradient(number) + self.network.alpha * self.previousIterations[number]

    def changeWeights(self):
        for x in range(len(self.weightsToNextLayer)):
            self.weightsToNextLayer[x] += self.deltaWeight(x)
            self.previousIterations[x] = self.deltaWeight(x)

    def backpropagation(self):
        self.changeWeights()

    def sumToPreviousLayer(self):
        if self.previousLayer:
            for x in self.previousLayer:
                x.sumFromNextLayer+= self.delta() * x.weightsToNextLayer[self.numberInLayer]


class HiddenNeuron(Neuron):

    def delta(self):
        sum = 0
        for x in range(len(self.weightsToNextLayer)):
            sum += self.weightsToNextLayer[x] * self.nextLayer.getNeuronValueByNumber(x)
        return sum * self.functionIn()


class OutputNeuron(Neuron):

    def __init__(self, previousLayer, selfValue):
        super().__init__(previousLayer, selfValue, None)
        self.idealResult = self.selfValue
        self.selfValue = 0

    def delta(self):
        return (self.idealResult - self.selfValue) * self.functionIn()

    def setIdeal(self, ideal):
        self.idealResult = ideal


class InputNeuron(Neuron):

    def __init__(self, selfValue, weightsToNextLayer):
        super().__init__(None, selfValue, weightsToNextLayer)

    def selfValueDefine(self):
        return self.selfValue

    def getSelfValue(self):
        return self.selfValue


class BiasNeuron(Neuron):

    def __init__(self, weightsToNextLayer):
        super().__init__(None, 1, weightsToNextLayer)

Layer and network classes:
import math
from neuron_classes import *


class Layer:

    def __init__(self, neuronsInLayer):
        self.neuronsInLayer = neuronsInLayer
        self.nextLayer = None
        self.network = None

    def initNeurons(self):
        for x in self.neuronsInLayer:
            x.nextLayer = self.nextLayer
            x.network = self.network



    def getNeuronValueByNumber(self, neuronNumber):
        return self.neuronsInLayer[neuronNumber].selfValue

    def getNeuronWeightByNumber(self, neuronNumber, weightNumber):
        return self.neuronsInLayer[neuronNumber].weightsToNextLayer[weightNumber]

    def getLengthOfLayer(self):
        return len(self.neuronsInLayer)

    def calculateNeurons(self):
        counter = 0
        for x in self.neuronsInLayer:
            x.setNumberInLayer(counter)
            x.selfValueDefine()
            counter+= 1

    def layerToString(self):
        convertedLayer = ''
        for x in self.neuronsInLayer:
            convertedLayer += str(x.getSelfValue)
        print(convertedLayer)
        return convertedLayer

    def backpropagation(self):
        for x in self.neuronsInLayer:
            x.backpropagation()



class Network:

    def __init__(self, layers, epsilon, alpha):
        self.layers = layers
        self.epsilon = epsilon
        self.alpha = alpha
        for x in range(len(layers) - 1):
            layers[x].network = self
            layers[x].nextLayer = layers[x + 1]
            layers[x].initNeurons()
        for x in range(len(layers) - 1):
            layers[x].nextLayer = layers[x + 1]
        self.forwardCounts = 0

    def forwardPass(self):
        for x in self.layers:
            x.calculateNeurons()
        self.forwardCounts+= 1
        n = self.layers[len(self.layers) - 1]
        return [self.layers[len(self.layers) - 1].getNeuronValueByNumber(x) for x in range(self.layers[len(self.layers) - 1].getLengthOfLayer())]

    def backpropagation(self):
        x = len(self.layers) - 1
        while x > 0:
            x -= 1
            self.layers[x].backpropagation()

    def setOutputIdealValues(self, outputIdealValues):
        counter = 0
        for x in self.layers[len(self.layers) - 1].neuronsInLayer:
            x.idealValue = outputIdealValues[counter]
            counter+= 1

    def setInputs(self, inputs):
        counter = 0
        for x in self.layers[0].neuronsInLayer:
            if type(x) != BiasNeuron:
                x.selfValue = inputs[counter]
                counter+= 1

    def checkNetwork(self):
        for y in range(len(self.layers)):
            string = ''
            for x in range(len(self.layers[y].neuronsInLayer)):
                string += str(self.layers[y].neuronsInLayer[x].getSelfValue())
                string += ' '
            print(string)


Convenient network access class:
from classes import *
from neuron_classes import *


class PackagedNetwork:

    def __init__(self, inputNeuronsNumber, layersNumber, neuronsInLayer, outputNeuronsNumber, epsilon, alpha):
        self.inputNeuronsNumber = inputNeuronsNumber
        self.layersNumber = layersNumber
        self.neuronsInLayer = neuronsInLayer
        self.outputNeuronsNumber = outputNeuronsNumber
        self.epsilon = epsilon
        self.alpha = alpha
        self.layers = [None for x in range(self.layersNumber)]
        self.network = None
        for y in range(layersNumber):
            currentLayer = []
            # input
            if y == 0:
                for x in range(inputNeuronsNumber):
                    currentLayer.append(InputNeuron(0, [1 * (i + 1) for i in range(neuronsInLayer)]))

            # output
            elif y == layersNumber - 1:
                for x in range(outputNeuronsNumber):
                    currentLayer.append(OutputNeuron(self.layers[y - 1], 0))
            # hidden
            elif y == layersNumber - 2:
                for x in range(neuronsInLayer):
                    currentLayer.append(HiddenNeuron(self.layers[y - 1], None, [1 * (i + 1) for i in range(self.outputNeuronsNumber)]))

            else:
                for x in range(neuronsInLayer):
                    currentLayer.append(HiddenNeuron(self.layers[y - 1], None, [1 * (i + 1) for i in range(self.neuronsInLayer)]))

            layer = Layer(currentLayer)
            self.layers[y] = layer
        self.network = Network(self.layers, self.epsilon, self.alpha)

    def forwardPass(self):
        return self.network.forwardPass()

    def backpropagation(self):
        return self.network.backpropagation()

    def setOutputIdealValues(self, outputIdealValues):
        self.network.setOutputIdealValues(outputIdealValues)

    def setInputs(self, inputs):
        self.network.setInputs(inputs)

    def checkNetwork(self):
        self.network.checkNetwork()

Test 1
from classes import *
from neuron_classes import *
import random

inputLayer = Layer([InputNeuron(1, [0.45, 0.78]), InputNeuron(0, [-0.12, 0.13])])
hiddenLayer = Layer([HiddenNeuron(inputLayer, None, [1.5]), HiddenNeuron(inputLayer, None, [-2.3])])
outputLayer = Layer([OutputNeuron(hiddenLayer, 1)])

network = Network([inputLayer, hiddenLayer, outputLayer], 0.7, 0.8)
for x in range(20):
    print(network.forwardPass())
    network.backpropagation()

Test 2:
from package_classes import *
from neuron_classes import *
from classes import *
import random

network = PackagedNetwork(2, 3, 2, 1, 0.7, 0.3)

print('xor')
input1 = 0
input2 = 0
idealOutput = 0
for x in range(200):
    input1 = random.randint(0, 1)
    input2 = random.randint(0, 1)
    idealOutput = int(input1 != input2)
    network.setInputs([input1, input2])
    network.setOutputIdealValues([idealOutput])
    network.checkNetwork()
    print(network.forwardPass(), input1, input2, idealOutput)
    print()
    network.backpropagation()

Answer the question

In order to leave comments, you need to log in

Didn't find what you were looking for?

Ask your question

Ask a Question

731 491 924 answers to any question