Finished main AI thinking. Next is to add backpropagation and learning etc

pull/1/head
E. Almqvist 4 years ago
parent de4e9d2e41
commit 0f40b6e8ae
  1. 6
      rgbAI/lib/func.py
  2. 18
      rgbAI/main.py

@ -5,13 +5,13 @@ class AIlib:
return 1/(1 + np.exp(-x))
def correctFunc(inp:np.array): # generates the correct answer for the AI
return np.array( rgb[2], rgb[1], rgb[0] ) # basically invert the rgb values
return np.array( [inp[2], inp[1], inp[0]] ) # basically invert the rgb values
def calcCost( inp:np.array, out:np.array ): # cost function, lower -> good, higher -> bad, bad bot, bad
sumC = 0
outLen = len(out)
correctOut = correctFunc(inp) # the "correct" output
correctOut = AIlib.correctFunc(inp) # the "correct" output
for i in range(outLen):
sumC += (out[i] - correctOut[i])**2 # get the difference of every value
@ -43,7 +43,7 @@ class AIlib:
if( layerIndex < maxLayer ):
return AIlib.think( layer, weights, bias, layerIndex + 1 )
else:
return layer
return np.squeeze(np.asarray(layer))
except (ValueError, IndexError) as err:
print("\n---------")

@ -14,14 +14,22 @@ class rgb(object):
self.bias = [ ai.genRandomMatrix(1, 4), ai.genRandomMatrix(1, 4), ai.genRandomMatrix(1, 3) ]
# This doesn't look very good, but it works so...
self.generation = 0
else: # if we want to load our progress from before then this would do it
self.weights = loadedWeights
self.bias = loadedBias
def calcError( self, inp:np.array, out:np.array ):
cost = ai.calcCost( inp, out )
print(cost)
# Cost needs to get to 0, we can figure out this with backpropagation
def learn():
print("learn")
def think(self, inp:np.array):
print("-----Gen " + str(self.generation) + "------")
print("\n-Input-")
print(inp)
print("\n")
@ -30,9 +38,15 @@ class rgb(object):
print("\n-Output-")
print(res)
print("\n----------------\n\n")
return res
def init(): # init func
def init(): # init
bot = rgb()
bot.think( np.array([0.2, 0.4, 0.8]) )
inpArr = np.array( [0.2, 0.4, 0.8] )
res = bot.think( inpArr )
cost = bot.calcError(inpArr, res)
init()

Loading…
Cancel
Save