Added recursive layer calculation function

pull/1/head
E. Almqvist 4 years ago
parent d26c6042a0
commit ba5662b371
  1. 35
      lib/func.py
  2. 27
      main.py

@ -4,8 +4,37 @@ class AIlib:
def sigmoid(x):
return 1/(1 + np.exp(-x))
def correctFunc(rgb): # generates the correct answer for the AI
return ( rgb[2], rgb[1], rgb[0] ) # basically invert the rgb values
def correctFunc(inp:np.array): # generates the correct answer for the AI
return np.array( rgb[2], rgb[1], rgb[0] ) # basically invert the rgb values
def genRandomMatrix( x:int, y:int ): # generate a matrix with x, y dimensions with random values from 0-1 in it
def calcCost( inp:np.array, out:np.array ): # cost function, lower -> good, higher -> bad, bad bot, bad
sumC = 0
outLen = len(out)
correctOut = correctFunc(inp) # the "correct" output
for i in range(outLen):
sumC += (out[i] - correctOut[i])**2 # get the difference of every value
return sumC / outLen # return the average cost of all rows
def genRandomMatrix( x:int, y:int, min: float=0.0, max: float=1.0 ): # generate a matrix with x, y dimensions with random values from min-max in it
return np.random.rand(x, y)
def think( inp:np.matrix, weights:list, bias:list, layerIndex: int=0 ): # recursive thinking, hehe
# the length of weights and bias should be the same
# if not then the neural net is flawed/incorrect
maxLayer = len(weights)
biasLen = len(bias)
if( maxLayer != len(bias) ):
print("Neural Network Error: Length of weights and bias are not equal.")
print("Weights: ${maxLayer}, Bias: ${biasLen}")
exit()
weightedInput = np.dot( weights[layerIndex], inp ) # dot multiply the input and the weights
layer = np.add( weightedInput, bias[layerIndex] ) # add the biases
if( layerIndex >= maxLayer ):
return layer
else:
think( layer, weights, bias, layerIndex + 1 )

@ -1,29 +1,34 @@
#!/usr/bin/env python
import numpy as np
from lib.func import AIlib as ai
class rgb(object):
def __init__(self, loadedWeights = None, loadedBias = None):
def __init__(self, loadedWeights: np.matrix=None, loadedBias: np.matrix=None):
if( not loadedWeights or not loadedBias ):
if( not loadedWeights or not loadedBias ): # if one is null (None) then just generate new ones
print("Generating weights and biases...")
self.weights = [ ai.genRandomMatrix(3, 4), ai.genRandomMatrix(4, 4), ai.genRandomMatrix(4, 3) ] # array of matrices of weights
# 3 input neurons -> 4 hidden neurons -> 4 hidden neurons -> 3 output neurons
# Will be needing biases too
# Generate the biases
self.bias = [ ai.genRandomMatrix(1, 4), ai.genRandomMatrix(1, 4), ai.genRandomMatrix(1, 3) ]
# This doesn't look very good, but it works so...
# This is all we need
else: # if we want to load our progress from before then this would do it
print("Loading neural net...")
self.weights = loadedWeights
self.bias = loadedBias
def think(self, inputMatrix):
print(self.weights)
print(self.bias)
def learn():
print("learn")
def think(self, inp:np.array):
res = ai.think( np.asmatrix(inp), self.weights, self.bias )
print(res)
# print(self.weights)
# print(self.bias)
def init(): # init func
bot = rgb()
bot.think(1)
bot.think( np.array([0.2, 0.4, 0.8]) )
init()

Loading…
Cancel
Save