Signal vs Noise – target recognition using Neural Networks

  1. A Python hack implementing a 3 level Neural Network. After 500 iterations of training, the network is capable of fully separating Targets (non random numeric patterns) from noise, i.e random numeric sequnces.

About swdevperestroika

High tech industry veteran, avid hacker reluctantly transformed to mgmt consultant.
This entry was posted in AI, development, Neural networks and tagged , , , . Bookmark the permalink.

1 Response to Signal vs Noise – target recognition using Neural Networks

  1. For some reason, the code didn’t get included in the post.

    Here it is:

    # Neural network with three levels. nr inputs,hidden layers, output
    # given by inputs, hidden,output.
    # hidden weights are applied to inputs, each verice having its own weight,
    # same for output weights. that is, hidden weights operate on inputs from
    # above, output weights also operate on stuff coming from above.
    #
    # For details, see Ch. 26 in http://www.dspguide.com

    import numpy as np
    import math
    import matplotlib.pyplot as plt

    #params
    inputs,hidden,output = 101,10,1
    nr_of_learning_data = 260
    mu = 0.005
    iterations = 800

    def sigmoid(nodesum,level):

    if nodesum 6:
    print ‘nodesum out of range for Sigmoid!:’,nodesum,’level:’,level

    if np.absolute(nodesum) < 0.000000001:
    print 'NODESUM too small!', nodesum, 'level',level

    return 1. / (1. + np.exp(-nodesum))

    def accumulator(layer,weights,next_layer):
    # multiplies each input with it's specific weight, returns next level nodes

    level = layer.size

    for i,e in np.ndenumerate(next_layer):
    prod = 0
    prod = layer * weights[i,:]
    next_layer[i] = sigmoid(prod.sum(),level)

    return next_layer

    def new_weights(input,hidden,output,hw,ow,error,mu):

    for h,hval in np.ndenumerate(hidden):
    for i,ival in np.ndenumerate(input):
    slope_o = output * (1 – output)
    slope_h = hidden[h] * (1 – hidden[h])
    dx3dw = input[i] * slope_h * ow[0][h] * error * mu
    hw[h,i] += dx3dw * error * mu

    for h,hval in np.ndenumerate(hidden):
    slope_o = output * (1. – output)
    dx3dw = hidden[h] * slope_o
    ow[0][h] += dx3dw * error * mu

    return hw,ow

    #init
    print 'Initializing'
    print 'Run Parameters:'
    print 'inputs:',inputs,'hidden:',hidden,'outputs:',output
    print 'Nr learning data:',nr_of_learning_data
    print 'Iterations:',iterations
    print 'Mu:',mu

    learning_input = np.ndarray((nr_of_learning_data,inputs))

    #first half is random
    learning_input[0:nr_of_learning_data/2,:] = np.random.rand(nr_of_learning_data/2,inputs)

    #second half non-random
    structured_data = np.ndarray((nr_of_learning_data/2,inputs))
    structured_data[0:,] = np.arange(nr_of_learning_data/2*inputs).reshape(nr_of_learning_data/2,inputs)
    structured_data[0:,] %= -5
    learning_input[nr_of_learning_data/2:,:] = structured_data

    # bias node
    learning_input[:,-1] = 1.

    hidden_weights = np.random.rand(hidden,inputs)
    hidden_weights /= (inputs * 10000000000.)

    output_weights = np.random.rand(output,hidden)
    output_weights /= 1

    hidden_layer = np.ndarray(hidden)
    output_layer = np.ndarray(output)

    for i in xrange(iterations):
    esum = 0

    #run each set of learning data elements through the network

    for learning_data_streams in xrange(nr_of_learning_data):
    input_layer = learning_input[learning_data_streams]
    hidden_layer = accumulator(input_layer,hidden_weights,hidden_layer)
    output_layer = accumulator(hidden_layer,output_weights,output_layer)

    if learning_data_streams < nr_of_learning_data/2:
    correct = 0 #random elements stored in first half of array
    else:
    correct = 1 #non-random

    error = correct – output_layer #error value for current data element

    if correct:
    error = error * 5 #give more weight to target asop non-target errors
    esum += error * error

    hiddenweights,output_weights = new_weights(input_layer,hidden_layer,output_layer,hidden_weights, output_weights,error,mu)

    if not i % 10:
    print 'iteration:',i,'esum:',esum

    #testrun

    print 'TESTRUN'
    #first half is random
    learning_input[0:nr_of_learning_data/2,:] = np.random.rand(nr_of_learning_data/2,inputs)

    #second half non-random
    structured_data = np.ndarray((nr_of_learning_data/2,inputs))
    structured_data[0:,] = np.arange(nr_of_learning_data/2*inputs).reshape(nr_of_learning_data/2,inputs)
    structured_data[0:,] %= -5 #normalize

    learning_input[nr_of_learning_data/2:,:] = structured_data
    learning_input[:,-1] = 1. #bias node

    output_lst =[]

    for learning_data_streams in xrange(nr_of_learning_data):
    input_layer = learning_input[learning_data_streams]
    hidden_layer = accumulator(input_layer,hidden_weights,hidden_layer)
    output_layer = accumulator(hidden_layer,output_weights,output_layer)
    output_lst.append(output_layer[0])

    plt.plot(xrange(nr_of_learning_data),output_lst,marker='x')
    axes = plt.gca()
    axes.set_ylim(0,1)

    plt.show()

Leave a Reply

Fill in your details below or click an icon to log in:

WordPress.com Logo

You are commenting using your WordPress.com account. Log Out /  Change )

Google photo

You are commenting using your Google account. Log Out /  Change )

Twitter picture

You are commenting using your Twitter account. Log Out /  Change )

Facebook photo

You are commenting using your Facebook account. Log Out /  Change )

Connecting to %s