diff --git a/rgbAI/lib/func.py b/rgbAI/lib/func.py index 4336679..7a7d9b9 100644 --- a/rgbAI/lib/func.py +++ b/rgbAI/lib/func.py @@ -24,11 +24,8 @@ class AIlib: return mat def think( inp:np.array, weights:list, bias:list, layerIndex: int=0 ): # recursive thinking, hehe - # the length of weights and bias should be the same - # if not then the neural net is flawed/incorrect - maxLayer = len(weights) - 1 - try: + maxLayer = len(weights) - 1 weightedInput = np.dot( inp, weights[layerIndex] ) # dot multiply the input and the weights layer = AIlib.sigmoid( np.add(weightedInput, bias[layerIndex]) ) # add the biases @@ -49,3 +46,7 @@ class AIlib: print( "Error: " + str(err) ) print( "Layer index: " + str(layerIndex) ) print( "Max layer index: " + str(maxLayer) ) + + def gradient( cost1:float, cost2:float, inp1:np.array, inp2:np.array ): + return (cost2 - cost1) / (inp2 - inp1) + diff --git a/rgbAI/main.py b/rgbAI/main.py index 652ad7d..587adf5 100755 --- a/rgbAI/main.py +++ b/rgbAI/main.py @@ -48,7 +48,16 @@ def init(): # init res = bot.think( inpArr ) cost = bot.calcError(inpArr, res) - print("Cost: " + str(cost)) + inpArr2 = np.array([0.3, 0.5, 0.9]) + res2 = bot.think(inpArr2) + cost2 = bot.calcError(inpArr2, res2) + print("Cost: " + str(cost2)) + print("\n----") + + print("Gradient\n") + gradient = ai.gradient( cost, cost2, inpArr, inpArr2 ) + print(gradient) + init()