From ed0751a110f1ba24fc56f05e375902bc6cb4c67c Mon Sep 17 00:00:00 2001 From: "E. Almqvist" Date: Thu, 22 Oct 2020 12:58:12 +0000 Subject: [PATCH] Tweaked inputs for training --- rgbAI/lib/ailib/ai.py | 4 ++-- rgbAI/main.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/rgbAI/lib/ailib/ai.py b/rgbAI/lib/ailib/ai.py index cc7e138..bead533 100644 --- a/rgbAI/lib/ailib/ai.py +++ b/rgbAI/lib/ailib/ai.py @@ -177,7 +177,7 @@ def printProgress(): print(f"Gen: {DEBUG_BUFFER['gen']}") print(f"inp: {DEBUG_BUFFER['inp']} | pre: {DEBUG_BUFFER['predicted']} cor: {DEBUG_BUFFER['correct']}") -def learn( inputNum:int, targetCost:float, obj, theta:float, curCost: float=None ): +def learn( inputNum:int, obj, theta:float, traintimes:int ): # Calculate the derivative for: # Cost in respect to weights # Cost in respect to biases @@ -186,7 +186,7 @@ def learn( inputNum:int, targetCost:float, obj, theta:float, curCost: float=None # So if we change all the weights with i.e. 0.01 = theta, then we can derive the gradient with math and stuff count = 0 - while( count <= 10000 ): # targetCost is the target for the cost function + while( count <= traintimes ): # targetCost is the target for the cost function count += 1 inp = np.asarray(np.random.rand( 1, inputNum ))[0] # create a random learning sample # inp = np.asarray([1.0, 1.0, 1.0]) diff --git a/rgbAI/main.py b/rgbAI/main.py index 7368cb4..4abdc81 100755 --- a/rgbAI/main.py +++ b/rgbAI/main.py @@ -27,7 +27,7 @@ class rgb(object): return cost def learn( self ): - ai.learn( 3, 0.001, self, 0.001 ) + ai.learn( 3, self, 0.001, 10000 ) def think( self, inp:np.array ): print("\n-Input-")