Fixed thining again

pull/1/head
E. Almqvist 4 years ago
parent e96525e4ff
commit 0a4ff885e2
  1. 9
      lib/func.py
  2. 11
      main.py

@ -32,12 +32,13 @@ class AIlib:
exit()
try:
print("Think " + str(layerIndex))
weightedInput = np.dot( weights[layerIndex], inp ) # dot multiply the input and the weights
weightedInput = np.dot( inp, weights[layerIndex] ) # dot multiply the input and the weights
layer = AIlib.sigmoid( np.add(weightedInput, bias[layerIndex]) ) # add the biases
print(layer)
print("\n")
if( layerIndex < maxLayer ):
print("Layer " + str(layerIndex))
print(layer)
print("\n")
if( layerIndex < maxLayer ):
return AIlib.think( layer, weights, bias, layerIndex + 1 )

@ -7,11 +7,11 @@ class rgb(object):
if( not loadedWeights or not loadedBias ): # if one is null (None) then just generate new ones
print("Generating weights and biases...")
self.weights = [ ai.genRandomMatrix(4, 3), ai.genRandomMatrix(4, 4), ai.genRandomMatrix(3, 4) ] # array of matrices of weights
self.weights = [ ai.genRandomMatrix(3, 4), ai.genRandomMatrix(4, 4), ai.genRandomMatrix(4, 3) ] # array of matrices of weights
# 3 input neurons -> 4 hidden neurons -> 4 hidden neurons -> 3 output neurons
# Generate the biases
self.bias = [ ai.genRandomMatrix(4, 1), ai.genRandomMatrix(4, 1), ai.genRandomMatrix(3, 1) ]
self.bias = [ ai.genRandomMatrix(1, 4), ai.genRandomMatrix(1, 4), ai.genRandomMatrix(1, 3) ]
# This doesn't look very good, but it works so...
else: # if we want to load our progress from before then this would do it
@ -22,8 +22,13 @@ class rgb(object):
print("learn")
def think(self, inp:np.array):
print("\n-Input-")
print(inp)
print("\n")
res = ai.think( inp, self.weights, self.bias )
print("\n-Result-")
print("\n-Output-")
print(res)
def init(): # init func

Loading…
Cancel
Save