Tweaked inputs for training

master
E. Almqvist 4 years ago
parent bd066bf00a
commit ed0751a110
  1. 4
      rgbAI/lib/ailib/ai.py
  2. 2
      rgbAI/main.py

@ -177,7 +177,7 @@ def printProgress():
print(f"Gen: {DEBUG_BUFFER['gen']}")
print(f"inp: {DEBUG_BUFFER['inp']} | pre: {DEBUG_BUFFER['predicted']} cor: {DEBUG_BUFFER['correct']}")
def learn( inputNum:int, targetCost:float, obj, theta:float, curCost: float=None ):
def learn( inputNum:int, obj, theta:float, traintimes:int ):
# Calculate the derivative for:
# Cost in respect to weights
# Cost in respect to biases
@ -186,7 +186,7 @@ def learn( inputNum:int, targetCost:float, obj, theta:float, curCost: float=None
# So if we change all the weights with i.e. 0.01 = theta, then we can derive the gradient with math and stuff
count = 0
while( count <= 10000 ): # targetCost is the target for the cost function
while( count <= traintimes ): # targetCost is the target for the cost function
count += 1
inp = np.asarray(np.random.rand( 1, inputNum ))[0] # create a random learning sample
# inp = np.asarray([1.0, 1.0, 1.0])

@ -27,7 +27,7 @@ class rgb(object):
return cost
def learn( self ):
ai.learn( 3, 0.001, self, 0.001 )
ai.learn( 3, self, 0.001, 10000 )
def think( self, inp:np.array ):
print("\n-Input-")

Loading…
Cancel
Save