Post by bluatigro on Nov 14, 2018 5:28:36 GMT -5
this is why i needed a getpixel()
WARNING :
gethdc not present jet
so this does not work jet
the net trys to seperate square's and circle's
WARNING :
gethdc not present jet
so this does not work jet
the net trys to seperate square's and circle's
'' bluatigro 14 nov 2018
'' ann bmp
'' based on :
''http://code.activestate.com/recipes/578148-simple-back-propagation-neural-network-in-python-s/
global ni , nh , no , size
size = 63
ni = size * size
nh = size * sqr( size )
no = 1
dim ai( ni ) , ah( nh )
dim ao( no ) , wish( no )
dim wi( ni , nh )
dim wo( nh , no )
dim ci( ni , nh )
dim co( nh , no )
dim od( nh ) , hd( nh )
open "ann bmp" for graphics as #m
#m "trapclose [quit]"
call init
''init inpout and output paterns
''let NN live and learn
for e = 0 to 1000
scan
dice = drawsquare()
''fill input cel's
for x = 0 to size
for y = 0 to size
if getpixel( hdc , x , y ) then
ai( in( x , y ) ) = 1
else
ai( in( x , y ) ) = -1
end if
next y
next x
''fil target
wish( 0 ) = dice
call calc
fout = backprop( .5 , .5 )
#m "goto 0 " ; size + 50
#m "down"
#m "\live : " ; e ; " | error : " ; fout
#m "up"
next e
notice "Lerning ready ."
fout = 0
for e = 0 to 100
scan
dice = drawsquare()
''fill input cel's
for x = 0 to size
for y = 0 to size
if getpixel( hdc , x , y ) then
ai( in( x , y ) ) = 1
else
ai( in( x , y ) ) = -1
end if
next y
next x
call calc
if dice > 0 then
if ao( 0 ) < 0 then
fout = fout + 1
end if
else
if ao( 0 ) > 0 then
fout = fout + 1
end if
end if
next e
#m "goto 0 " ; size + 50
#m "down"
#m "\error : " ; fout
#m "up"
notice "[ game over ]"
wait
[quit]
close #m
end
function getpixel( hDC , x , y )
calldll #gdi32 , "GetPixel" _
, hDC as ulong _
, x as long _
, y as long _
, getpixel as ulong
end function
function in( x , y )
in = x + y * size
end function
function drawsquare()
r = irange( 10 , 30 )
x = irange( r + 5 , 63 - r - 5 )
y = irange( r + 5 , 63 - r - 5 )
if irange( 0 , 1 ) then
uit = 1
#m "goto " ; x - r ; " " ; y - r
#m "down"
#m "boxfilled " ; x + r ; " " ; y + r
#m "up"
else
uit = -1
#m "goto " ; x ; " " ; y
#m "down"
#m "circlefilled " ; r
#m "up"
end if
drawsquare = uit
end function
function irange( l , h )
irange = int( range( l , h + 1 ) )
end function
function range( l , h )
range = rnd(0) * ( h - l ) + l
end function
sub init
''init neural net
for i = 0 to ni
ai( i ) = 1
next i
for i = 0 to no - 1
ao( i ) = 1
next i
for i = 0 to ni
for h = 0 to nh - 1
wi( i , h ) = range( -1 , 1 )
next h
next i
for h = 0 to nh - 1
for o = 0 to no - 1
wo( h , o ) = range( -1 , 1 )
next o
next h
end sub
sub calc
''forwart pass of neural net
for h = 0 to nh
sum = 0
for i = 0 to ni
sum = sum + ai( i ) * wi( i , h )
next i
ah( h ) = signoid( sum / ni )
next h
for o = 0 to no
sum = 0
for h = 0 to nh
sum = sum + ah( h ) * wo( h , o )
next h
ao( o ) = signoid( sum / nh )
next o
end sub
function tanh( x )
tanh = ( 1 - exp( -2 * x ) ) _
/ ( 1 + exp( -2 *x ) )
end function
function signoid( x )
signoid = tanh( x )
end function
function dsignoid( x )
dsignoid = 1 - x ^ 2
end function
function backprop( n , m )
'' http://www.youtube.com/watch?v=aVId8KMsdUU&feature=BFa&list=LLldMCkmXl4j9_v0HeKdNcRA
'' calc output deltas
'' we want to find the instantaneous rate of change of ( error with respect to weight from node j to node k)
'' output_delta is defined as an attribute of each ouput node. It is not the final rate we need.
'' To get the final rate we must multiply the delta by the activation of the hidden layer node in question.
'' This multiplication is done according to the chain rule as we are taking the derivative of the activation function
'' of the ouput node.
'' dE/dw[j][k] = (t[k] - ao[k]) * s'( SUM( w[j][k]*ah[j] ) ) * ah[j]
for k = 0 to no - 1
fout = wish( k ) - ao( k )
od( k ) = fout * dsignoid( ao( k ) )
next k
'' update output weights
for j = 0 to nh
for k = 0 to no
'' output_deltas[k] * self.ah[j]
'' is the full derivative of
'' dError/dweight[j][k]
c = od( k ) * ah( nh , j )
wo( j , k ) = wo( j , k ) + n * c + m * co( j , k )
co( j , k ) = c
next k
next j
'' calc hidden deltas input layer
for j = 0 to nh
fout = 0
for k = 0 to no
fout = fout + od( k ) * wo( j , k )
next k
hd( j ) = fout * dsignoid( ao( j ) )
next j
'' update input weights
for i = 0 to ni
for j = 0 to nh
c = hd( j ) * ai( i )
wi( i , j ) = wi( i , j ) _
+ n * c + m * ci( i , j )
ci( i , j ) = c
next j
next i
fout = 0
for k = 0 to no
fout = fout _
+ ( wish( k ) - ao( k ) ) ^ 2
next k
backprop = fout / 2
end function