diff --git a/rgbAI/lib/func.py b/rgbAI/lib/func.py index 018aba8..038c68e 100644 --- a/rgbAI/lib/func.py +++ b/rgbAI/lib/func.py @@ -80,15 +80,12 @@ class AIlib: if( newLayer <= maxLayer ): return AIlib.gradient( inp, obj, theta, maxLayer, newLayer, grads, obj1, obj2 ) else: - return grads + return grads, cost2 - def mutateProp( prop:list, lr:float, gradient ): - newProp = [None] * len(prop) - - for i in range(len(prop)): - newProp[i] = prop[i] - (lr*gradient) - - return newProp + def mutateProps( obj, maxLen:int, gradient:list ): + for i in range(maxLen): + obj.weights[i] -= obj.learningrate * gradient[i]["weight"] # mutate the weights + obj.bias[i] -= obj.learningrate * gradient[i]["bias"] def learn( inp:np.array, obj, theta:float ): # Calculate the derivative for: @@ -97,7 +94,8 @@ class AIlib: # i.e. : W' = W - lr * gradient (respect to W in layer i) = W - lr*[ dC / dW[i] ... ] # So if we change all the weights with i.e. 0.01 = theta, then we can derive the gradient with math and stuff - grads = AIlib.gradient( inp, obj, theta, len(obj.bias) - 1 ) - print("####\n\n\n\n") - print(grads) + maxLen = len(obj.bias) + grads, cost = AIlib.gradient( inp, obj, theta, maxLen - 1 ) + AIlib.mutateProps( obj, maxLen, grads ) # mutate the props for next round + print("Cost:", cost) diff --git a/rgbAI/main.py b/rgbAI/main.py index 1adc53a..d4d06b2 100755 --- a/rgbAI/main.py +++ b/rgbAI/main.py @@ -55,7 +55,7 @@ class rgb(object): def init(): bot = rgb() - bot.traintimes = 1 + bot.traintimes = 10 bot.train()