From ba5662b3718f31a7d51bf0744924193d6e442865 Mon Sep 17 00:00:00 2001 From: "E. Almqvist" Date: Fri, 28 Aug 2020 20:55:36 +0200 Subject: [PATCH] Added recursive layer calculation function --- lib/func.py | 35 ++++++++++++++++++++++++++++++++--- main.py | 27 ++++++++++++++++----------- 2 files changed, 48 insertions(+), 14 deletions(-) diff --git a/lib/func.py b/lib/func.py index 4e3f7e0..59d4d57 100644 --- a/lib/func.py +++ b/lib/func.py @@ -4,8 +4,37 @@ class AIlib: def sigmoid(x): return 1/(1 + np.exp(-x)) - def correctFunc(rgb): # generates the correct answer for the AI - return ( rgb[2], rgb[1], rgb[0] ) # basically invert the rgb values + def correctFunc(inp:np.array): # generates the correct answer for the AI + return np.array( rgb[2], rgb[1], rgb[0] ) # basically invert the rgb values - def genRandomMatrix( x:int, y:int ): # generate a matrix with x, y dimensions with random values from 0-1 in it + def calcCost( inp:np.array, out:np.array ): # cost function, lower -> good, higher -> bad, bad bot, bad + sumC = 0 + outLen = len(out) + + correctOut = correctFunc(inp) # the "correct" output + + for i in range(outLen): + sumC += (out[i] - correctOut[i])**2 # get the difference of every value + + return sumC / outLen # return the average cost of all rows + + def genRandomMatrix( x:int, y:int, min: float=0.0, max: float=1.0 ): # generate a matrix with x, y dimensions with random values from min-max in it return np.random.rand(x, y) + + def think( inp:np.matrix, weights:list, bias:list, layerIndex: int=0 ): # recursive thinking, hehe + # the length of weights and bias should be the same + # if not then the neural net is flawed/incorrect + maxLayer = len(weights) + biasLen = len(bias) + if( maxLayer != len(bias) ): + print("Neural Network Error: Length of weights and bias are not equal.") + print("Weights: ${maxLayer}, Bias: ${biasLen}") + exit() + + weightedInput = np.dot( weights[layerIndex], inp ) # dot multiply the input and the weights + layer = np.add( weightedInput, bias[layerIndex] ) # add the biases + + if( layerIndex >= maxLayer ): + return layer + else: + think( layer, weights, bias, layerIndex + 1 ) diff --git a/main.py b/main.py index 379363c..d39392e 100755 --- a/main.py +++ b/main.py @@ -1,29 +1,34 @@ #!/usr/bin/env python - +import numpy as np from lib.func import AIlib as ai class rgb(object): - def __init__(self, loadedWeights = None, loadedBias = None): + def __init__(self, loadedWeights: np.matrix=None, loadedBias: np.matrix=None): - if( not loadedWeights or not loadedBias ): + if( not loadedWeights or not loadedBias ): # if one is null (None) then just generate new ones + print("Generating weights and biases...") self.weights = [ ai.genRandomMatrix(3, 4), ai.genRandomMatrix(4, 4), ai.genRandomMatrix(4, 3) ] # array of matrices of weights # 3 input neurons -> 4 hidden neurons -> 4 hidden neurons -> 3 output neurons - # Will be needing biases too + # Generate the biases self.bias = [ ai.genRandomMatrix(1, 4), ai.genRandomMatrix(1, 4), ai.genRandomMatrix(1, 3) ] # This doesn't look very good, but it works so... - # This is all we need + else: # if we want to load our progress from before then this would do it - print("Loading neural net...") self.weights = loadedWeights self.bias = loadedBias - def think(self, inputMatrix): - print(self.weights) - print(self.bias) + def learn(): + print("learn") + + def think(self, inp:np.array): + res = ai.think( np.asmatrix(inp), self.weights, self.bias ) + print(res) + # print(self.weights) + # print(self.bias) + def init(): # init func bot = rgb() - - bot.think(1) + bot.think( np.array([0.2, 0.4, 0.8]) ) init()