From 26d8e06230f0f0bb24a51bd60ffe2869c06175a0 Mon Sep 17 00:00:00 2001 From: "E. Almqvist" Date: Sun, 13 Sep 2020 21:14:39 +0200 Subject: [PATCH] Removed unneeded code --- rgbAI/lib/func.py | 61 +++++++++++++++-------------------------------- rgbAI/main.py | 2 ++ 2 files changed, 21 insertions(+), 42 deletions(-) diff --git a/rgbAI/lib/func.py b/rgbAI/lib/func.py index 8e3bc90..892a99a 100644 --- a/rgbAI/lib/func.py +++ b/rgbAI/lib/func.py @@ -24,44 +24,21 @@ class AIlib: return mat def think( inp:np.array, weights:list, bias:list, layerIndex: int=0 ): # recursive thinking, hehe - try: - maxLayer = len(weights) - 1 - weightedInput = np.dot( inp, weights[layerIndex] ) # dot multiply the input and the weights - layer = AIlib.sigmoid( np.add(weightedInput, bias[layerIndex]) ) # add the biases - - if( layerIndex < maxLayer ): - print(weights[layerIndex]) - print("\n") - print("Layer " + str(layerIndex)) - print(layer) - print("\n") - - if( layerIndex < maxLayer ): - return AIlib.think( layer, weights, bias, layerIndex + 1 ) - else: - out = np.squeeze(np.asarray(layer)) - print("-Result-") - print(out) - print("\n") - return out - - except (ValueError, IndexError) as err: - print("\n---------") - print( "Error: " + str(err) ) - print( "Layer index: " + str(layerIndex) ) - print( "Max layer index: " + str(maxLayer) ) - - def gradient( dCost:float, prop:list ): - propLen = len(prop) - gradient = [None] * propLen - for i in range( propLen - 1, -1, -1 ): - # if( i == propLen - 1 ): - # gradient[i] = dCost / prop[i] - # else: - # gradient[i] = dCost / (prop[i] + gradient[i+1]) - gradient[i] = dCost / prop[i] - - return gradient + maxLayer = len(weights) - 1 + weightedInput = np.dot( inp, weights[layerIndex] ) # dot multiply the input and the weights + layer = AIlib.sigmoid( np.add(weightedInput, bias[layerIndex]) ) # add the biases + + if( layerIndex < maxLayer ): + return AIlib.think( layer, weights, bias, layerIndex + 1 ) + else: + out = np.squeeze(np.asarray(layer)) + print("-Result-") + print(out) + print("\n") + return out + + def gradient( dCost:float, dx:float, prop:list ): + # Calculate the gradient def mutateProp( prop:list, gradient:list ): newProp = [None] * len(gradient) @@ -85,10 +62,10 @@ class AIlib: dCost = cost2 - cost1 # get the difference - weightDer = AIlib.gradient( dCost, obj.weights ) - biasDer = AIlib.gradient( dCost, obj.bias ) + weightDer = AIlib.gradient( dCost, theta, obj.weights ) + biasDer = AIlib.gradient( dCost, theta, obj.bias ) - obj.weights = AIlib.mutateProp( obj.weights, weightDer ) - obj.bias = AIlib.mutateProp( obj.bias, biasDer ) + #obj.weights = AIlib.mutateProp( obj.weights, weightDer ) + #obj.bias = AIlib.mutateProp( obj.bias, biasDer ) print("Cost: ", cost1) diff --git a/rgbAI/main.py b/rgbAI/main.py index a4dcdbd..c2616b7 100755 --- a/rgbAI/main.py +++ b/rgbAI/main.py @@ -16,6 +16,8 @@ class rgb(object): self.generation = 0 + self.learningrate = 0.01 # the learning rate of this ai + print( self.weights ) print( self.bias )