Made code pep8 compliant

pull/2/merge^2
Alve 4 years ago
parent 2925776052
commit 118b13c971
  1. 295
      rgbAI/lib/func.py
  2. 71
      rgbAI/main.py

@ -1,148 +1,163 @@
import numpy as np import numpy as np
from copy import deepcopy as copy from copy import deepcopy as copy
class AIlib:
def sigmoid(x):
return 1/(1 + np.exp(-x))
def correctFunc(inp:np.array): # generates the correct answer for the AI
return np.asarray( [1.0 - inp[0], 1.0 - inp[1], 1.0 - inp[2]] ) # basically invert the rgb values
def calcCost( predicted:np.array, correct:np.array ): # cost function, lower -> good, higher -> bad, bad bot, bad
costSum = 0
maxLen = len(correct)
for i in range(maxLen):
costSum += abs((predicted[i] - correct[i]))
return costSum / maxLen
def getThinkCost( inp:np.array, predicted:np.array ):
corr = AIlib.correctFunc(inp)
return AIlib.calcCost( predicted, corr )
def genRandomMatrix( x:int, y:int, min: float=0.0, max: float=1.0 ): # generate a matrix with x, y dimensions with random values from min-max in it
# apply ranger with * and -
mat = np.random.rand(x, y) - 0.25
return mat
def think( inp:np.array, obj, layerIndex: int=0 ): # recursive thinking, hehe
maxLayer = len(obj.weights) - 1
weightedLayer = np.dot( inp, obj.weights[layerIndex] ) # dot multiply the input and the weights
layer = AIlib.sigmoid( np.add(weightedLayer, obj.bias[layerIndex]) ) # add the biases
if( layerIndex < maxLayer ):
return AIlib.think( layer, obj, layerIndex + 1 )
else:
out = np.squeeze(np.asarray(layer))
return out
def propDer( dCost, dProp ):
# Calculate the partial derivative for that prop
return dCost / dProp
def compareAIobjects( inp, obj1, obj2 ):
# Compare the two instances
res1 = AIlib.think( inp, obj1 )
cost1 = AIlib.getThinkCost( inp, res1 ) # get the cost
res2 = AIlib.think( inp, obj2 )
cost2 = AIlib.getThinkCost( inp, res2 ) # get the second cost
# Actually calculate stuff
dCost = cost2 - cost1
return dCost, cost1
def compareInstanceWeight( obj, inp, theta:float, layerIndex:int, neuronIndex_X:int, neuronIndex_Y:int ):
# Create new a instance of the object
obj2 = copy(obj) # annoying way to create a new instance of the object
obj2.weights[layerIndex][neuronIndex_X][neuronIndex_Y] += theta # mutate the second objects neuron
dCost, curCost = AIlib.compareAIobjects( inp, obj, obj2 ) # compare the two and get the dCost with respect to the weights
return dCost, curCost
def compareInstanceBias( obj, inp, theta:float, layerIndex:int, biasIndex:int ):
obj2 = copy(obj)
obj2.bias[layerIndex][0][biasIndex] += theta # do the same thing for the bias
dCost, curCost = AIlib.compareAIobjects( inp, obj, obj2 )
return dCost, curCost
def getChangeInCost( obj, inp, theta, layerIndex ): class AIlib:
mirrorObj = copy(obj) def sigmoid(x):
return 1/(1 + np.exp(-x))
# Fill the buffer with None so that the dCost can replace it later
dCost_W = np.zeros( shape = mirrorObj.weights[layerIndex].shape ) # fill it with a placeholder
dCost_B = np.zeros( shape = mirrorObj.bias[layerIndex].shape )
# Get the cost change for the weights
weightLenX = len(dCost_W)
weightLenY = len(dCost_W[0])
for x in range(weightLenX): # get the dCost for each x,y
for y in range(weightLenY):
dCost_W[x][y], curCostWeight = AIlib.compareInstanceWeight( obj, inp, theta, layerIndex, x, y )
# Get the cost change for the biases
biasLenY = len(dCost_B[0])
for index in range(biasLenY):
dCost_B[0][index], curCostBias = AIlib.compareInstanceBias( obj, inp, theta, layerIndex, index )
return dCost_W, dCost_B, (curCostBias + curCostWeight)/2
def gradient( inp:np.array, obj, theta:float, maxLayer:int, layerIndex: int=0, grads=None, obj1=None, obj2=None ): # Calculate the gradient for that prop
# Check if grads exists, if not create the buffer
if( not grads ):
grads = [None] * (maxLayer+1)
dCost_W, dCost_B, meanCurCost = AIlib.getChangeInCost( obj, inp, theta, layerIndex )
# Calculate the gradient for the layer
weightDer = AIlib.propDer( dCost_W, theta )
biasDer = AIlib.propDer( dCost_B, theta )
# Append the gradients to the list
grads[layerIndex] = {
"weight": weightDer,
"bias": biasDer
}
newLayer = layerIndex + 1
if( newLayer <= maxLayer ):
return AIlib.gradient( inp, obj, theta, maxLayer, newLayer, grads, obj1, obj2 )
else:
return grads, meanCurCost
def mutateProps( inpObj, maxLen:int, gradient:list ):
obj = copy(inpObj)
for i in range(maxLen):
obj.weights[i] -= obj.learningrate * gradient[i]["weight"] # mutate the weights
obj.bias[i] -= obj.learningrate * gradient[i]["bias"]
return obj
def learn( inputNum:int, targetCost:float, obj, theta:float, curCost: float=None ):
# Calculate the derivative for:
# Cost in respect to weights
# Cost in respect to biases
# i.e. : W' = W - lr * gradient (respect to W in layer i) = W - lr*[ dC / dW[i] ... ]
# So if we change all the weights with i.e. 0.01 = theta, then we can derive the gradient with math and stuff
inp = np.asarray(np.random.rand( 1, inputNum ))[0] # create a random learning sample
while( not curCost or curCost > targetCost ): # targetCost is the target for the cost function def correctFunc(inp: np.array): # generates the correct answer for the AI
maxLen = len(obj.bias) # basically invert the rgb values
grads, curCost = AIlib.gradient( inp, obj, theta, maxLen - 1 ) return np.asarray([1.0 - inp[0], 1.0 - inp[1], 1.0 - inp[2]])
obj = AIlib.mutateProps( obj, maxLen, grads ) # mutate the props for next round # cost function, lower -> good, higher -> bad, bad bot, bad
print(f"Cost: {curCost}") def calcCost(predicted: np.array, correct: np.array):
costSum = 0
maxLen = len(correct)
for i in range(maxLen):
costSum += abs((predicted[i] - correct[i]))
return costSum / maxLen
def getThinkCost(inp: np.array, predicted: np.array):
corr = AIlib.correctFunc(inp)
return AIlib.calcCost(predicted, corr)
# generate a matrix with x, y dimensions with random values from min-max in it
def genRandomMatrix(x: int, y: int, min: float = 0.0, max: float = 1.0):
# apply ranger with * and -
mat = np.random.rand(x, y) - 0.25
return mat
def think(inp: np.array, obj, layerIndex: int = 0): # recursive thinking, hehe
maxLayer = len(obj.weights) - 1
# dot multiply the input and the weights
weightedLayer = np.dot(inp, obj.weights[layerIndex])
layer = AIlib.sigmoid(
np.add(weightedLayer, obj.bias[layerIndex])) # add the biases
if(layerIndex < maxLayer):
return AIlib.think(layer, obj, layerIndex + 1)
else:
out = np.squeeze(np.asarray(layer))
return out
def propDer(dCost, dProp):
# Calculate the partial derivative for that prop
return dCost / dProp
def compareAIobjects(inp, obj1, obj2):
# Compare the two instances
res1 = AIlib.think(inp, obj1)
cost1 = AIlib.getThinkCost(inp, res1) # get the cost
res2 = AIlib.think(inp, obj2)
cost2 = AIlib.getThinkCost(inp, res2) # get the second cost
# Actually calculate stuff
dCost = cost2 - cost1
return dCost, cost1
def compareInstanceWeight(obj, inp, theta: float, layerIndex: int, neuronIndex_X: int, neuronIndex_Y: int):
# Create new a instance of the object
obj2 = copy(obj) # annoying way to create a new instance of the object
# mutate the second objects neuron
obj2.weights[layerIndex][neuronIndex_X][neuronIndex_Y] += theta
# compare the two and get the dCost with respect to the weights
dCost, curCost = AIlib.compareAIobjects(inp, obj, obj2)
return dCost, curCost
def compareInstanceBias(obj, inp, theta: float, layerIndex: int, biasIndex: int):
obj2 = copy(obj)
# do the same thing for the bias
obj2.bias[layerIndex][0][biasIndex] += theta
dCost, curCost = AIlib.compareAIobjects(inp, obj, obj2)
return dCost, curCost
def getChangeInCost(obj, inp, theta, layerIndex):
mirrorObj = copy(obj)
# Fill the buffer with None so that the dCost can replace it later
# fill it with a placeholder
dCost_W = np.zeros(shape=mirrorObj.weights[layerIndex].shape)
dCost_B = np.zeros(shape=mirrorObj.bias[layerIndex].shape)
# Get the cost change for the weights
weightLenX = len(dCost_W)
weightLenY = len(dCost_W[0])
for x in range(weightLenX): # get the dCost for each x,y
for y in range(weightLenY):
dCost_W[x][y], curCostWeight = AIlib.compareInstanceWeight(
obj, inp, theta, layerIndex, x, y)
# Get the cost change for the biases
biasLenY = len(dCost_B[0])
for index in range(biasLenY):
dCost_B[0][index], curCostBias = AIlib.compareInstanceBias(
obj, inp, theta, layerIndex, index)
return dCost_W, dCost_B, (curCostBias + curCostWeight)/2
# Calculate the gradient for that prop
def gradient(inp: np.array, obj, theta: float, maxLayer: int, layerIndex: int = 0, grads=None, obj1=None, obj2=None):
# Check if grads exists, if not create the buffer
if(not grads):
grads = [None] * (maxLayer+1)
dCost_W, dCost_B, meanCurCost = AIlib.getChangeInCost(
obj, inp, theta, layerIndex)
# Calculate the gradient for the layer
weightDer = AIlib.propDer(dCost_W, theta)
biasDer = AIlib.propDer(dCost_B, theta)
# Append the gradients to the list
grads[layerIndex] = {
"weight": weightDer,
"bias": biasDer
}
newLayer = layerIndex + 1
if(newLayer <= maxLayer):
return AIlib.gradient(inp, obj, theta, maxLayer, newLayer, grads, obj1, obj2)
else:
return grads, meanCurCost
def mutateProps(inpObj, maxLen: int, gradient: list):
obj = copy(inpObj)
for i in range(maxLen):
obj.weights[i] -= obj.learningrate * \
gradient[i]["weight"] # mutate the weights
obj.bias[i] -= obj.learningrate * gradient[i]["bias"]
return obj
def learn(inputNum: int, targetCost: float, obj, theta: float, curCost: float = None):
# Calculate the derivative for:
# Cost in respect to weights
# Cost in respect to biases
print("DONE\n") # i.e. : W' = W - lr * gradient (respect to W in layer i) = W - lr*[ dC / dW[i] ... ]
print(obj.weights) # So if we change all the weights with i.e. 0.01 = theta, then we can derive the gradient with math and stuff
print(obj.bias)
inp = np.asarray(np.random.rand(1, inputNum))[
0] # create a random learning sample
# targetCost is the target for the cost function
while(not curCost or curCost > targetCost):
maxLen = len(obj.bias)
grads, curCost = AIlib.gradient(inp, obj, theta, maxLen - 1)
# mutate the props for next round
obj = AIlib.mutateProps(obj, maxLen, grads)
print(f"Cost: {curCost}")
print("DONE\n")
print(obj.weights)
print(obj.bias)

@ -2,52 +2,57 @@
import numpy as np import numpy as np
from lib.func import AIlib as ai from lib.func import AIlib as ai
class rgb(object): class rgb(object):
def __init__(self, loadedWeights: np.matrix=None, loadedBias: np.matrix=None): def __init__(self, loadedWeights: np.matrix = None, loadedBias: np.matrix = None):
if(not loadedWeights or not loadedBias): # if one is null (None) then just generate new ones
print("Generating weights and biases...")
self.weights = [ai.genRandomMatrix(3, 8), ai.genRandomMatrix(
8, 8), ai.genRandomMatrix(8, 3)] # array of matrices of weights
# 3 input neurons -> 8 hidden neurons -> 8 hidden neurons -> 3 output neurons
if( not loadedWeights or not loadedBias ): # if one is null (None) then just generate new ones # Generate the biases
print("Generating weights and biases...") self.bias = [ai.genRandomMatrix(1, 8), ai.genRandomMatrix(
self.weights = [ ai.genRandomMatrix(3, 8), ai.genRandomMatrix(8, 8), ai.genRandomMatrix(8, 3) ] # array of matrices of weights 1, 8), ai.genRandomMatrix(1, 3)]
# 3 input neurons -> 8 hidden neurons -> 8 hidden neurons -> 3 output neurons # This doesn't look very good, but it works so...
# Generate the biases self.learningrate = 0.01 # the learning rate of this ai
self.bias = [ ai.genRandomMatrix(1, 8), ai.genRandomMatrix(1, 8), ai.genRandomMatrix(1, 3) ]
# This doesn't look very good, but it works so...
self.learningrate = 0.01 # the learning rate of this ai print(self.weights)
print(self.bias)
print( self.weights ) else: # if we want to load our progress from before then this would do it
print( self.bias ) self.weights = loadedWeights
self.bias = loadedBias
else: # if we want to load our progress from before then this would do it def calcError(self, inp: np.array, out: np.array):
self.weights = loadedWeights cost = ai.calcCost(inp, out)
self.bias = loadedBias # Cost needs to get to 0, we can figure out this with backpropagation
return cost
def calcError( self, inp:np.array, out:np.array ): def learn(self):
cost = ai.calcCost( inp, out ) ai.learn(3, 0.0001, self, 0.001)
# Cost needs to get to 0, we can figure out this with backpropagation
return cost
def learn( self ): def think(self, inp: np.array):
ai.learn( 3, 0.0001, self, 0.001 ) print("\n-Input-")
print(inp)
def think( self, inp:np.array ): res = ai.think(inp, self)
print("\n-Input-")
print(inp)
res = ai.think( inp, self ) print("\n-Output-")
print(res)
return res
print("\n-Output-")
print(res)
return res
def init(): def init():
bot = rgb() bot = rgb()
bot.learn() bot.learn()
inpArr = np.asarray([1.0, 1.0, 1.0])
res = bot.think(inpArr)
err = bot.calcError(inpArr, res)
print(err)
inpArr = np.asarray([1.0, 1.0, 1.0])
res = bot.think( inpArr )
err = bot.calcError( inpArr, res )
print(err)
init() init()

Loading…
Cancel
Save