From c1de21eaad44a62a55ec58ac1c4a3b8ddfa7b7e5 Mon Sep 17 00:00:00 2001 From: "E. Almqvist" Date: Fri, 2 Oct 2020 11:00:30 +0200 Subject: [PATCH] Stuff --- rgbAI/lib/func.py | 3 +-- rgbAI/main.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/rgbAI/lib/func.py b/rgbAI/lib/func.py index 898558c..abcd950 100644 --- a/rgbAI/lib/func.py +++ b/rgbAI/lib/func.py @@ -98,9 +98,8 @@ class AIlib: # i.e. : W' = W - lr * gradient (respect to W in layer i) = W - lr*[ dC / dW[i] ... ] # So if we change all the weights with i.e. 0.01 = theta, then we can derive the gradient with math and stuff - inp = np.asarray(np.random.rand( 1, inputNum ))[0] # create a random learning sample - while( not curCost or curCost > targetCost ): # targetCost is the target for the cost function + inp = np.asarray(np.random.rand( 1, inputNum ))[0] # create a random learning sample maxLen = len(obj.bias) grads, res, curCost = AIlib.gradient( inp, obj, theta, maxLen - 1 ) obj = AIlib.mutateProps( obj, maxLen, grads ) # mutate the props for next round diff --git a/rgbAI/main.py b/rgbAI/main.py index 9fd8992..e105f92 100755 --- a/rgbAI/main.py +++ b/rgbAI/main.py @@ -29,7 +29,7 @@ class rgb(object): return cost def learn( self ): - ai.learn( 3, 0.01, self, 0.001 ) + ai.learn( 3, 0.0001, self, 0.001 ) def think( self, inp:np.array ): print("\n-Input-")