Backprop and gradient stuff

pull/1/head
E. Almqvist 4 years ago
parent 788ee4fb2d
commit fc779f1afd
  1. 20
      rgbAI/lib/func.py
  2. 2
      rgbAI/main.py

@ -80,15 +80,12 @@ class AIlib:
if( newLayer <= maxLayer ): if( newLayer <= maxLayer ):
return AIlib.gradient( inp, obj, theta, maxLayer, newLayer, grads, obj1, obj2 ) return AIlib.gradient( inp, obj, theta, maxLayer, newLayer, grads, obj1, obj2 )
else: else:
return grads return grads, cost2
def mutateProp( prop:list, lr:float, gradient ): def mutateProps( obj, maxLen:int, gradient:list ):
newProp = [None] * len(prop) for i in range(maxLen):
obj.weights[i] -= obj.learningrate * gradient[i]["weight"] # mutate the weights
for i in range(len(prop)): obj.bias[i] -= obj.learningrate * gradient[i]["bias"]
newProp[i] = prop[i] - (lr*gradient)
return newProp
def learn( inp:np.array, obj, theta:float ): def learn( inp:np.array, obj, theta:float ):
# Calculate the derivative for: # Calculate the derivative for:
@ -97,7 +94,8 @@ class AIlib:
# i.e. : W' = W - lr * gradient (respect to W in layer i) = W - lr*[ dC / dW[i] ... ] # i.e. : W' = W - lr * gradient (respect to W in layer i) = W - lr*[ dC / dW[i] ... ]
# So if we change all the weights with i.e. 0.01 = theta, then we can derive the gradient with math and stuff # So if we change all the weights with i.e. 0.01 = theta, then we can derive the gradient with math and stuff
grads = AIlib.gradient( inp, obj, theta, len(obj.bias) - 1 ) maxLen = len(obj.bias)
print("####\n\n\n\n") grads, cost = AIlib.gradient( inp, obj, theta, maxLen - 1 )
print(grads) AIlib.mutateProps( obj, maxLen, grads ) # mutate the props for next round
print("Cost:", cost)

@ -55,7 +55,7 @@ class rgb(object):
def init(): def init():
bot = rgb() bot = rgb()
bot.traintimes = 1 bot.traintimes = 10
bot.train() bot.train()

Loading…
Cancel
Save