-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathfancy_mlp.py
More file actions
65 lines (47 loc) · 2.26 KB
/
fancy_mlp.py
File metadata and controls
65 lines (47 loc) · 2.26 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import numpy as np
def applySigmoid(x, giveMeTheDerivative = False):
if(giveMeTheDerivative == True):
return applySigmoid(x) * (1 - applySigmoid(x))
return 1 / (1 + np.exp(-x))
def print_data(iter, inputs, keys, weights, prediction):
print "This is iteration # ", iter
print "Your original input data was... \n", inputs
print "Your orignal keys were... \n", keys
print "Your weights at this specific iteration are... \n", weights
print "Our prediction at this iteration was... \n", prediction
print "--------------------------------------------------\n"
def train(inputs, keys, layer_one_weights, layer_two_weights):
for iter in xrange(20000):
# Layer one will have its own inputs and they are the ones directly given to us from main.
layer_one_inputs = inputs;
# Predict just like in simple_mlp.py
layer_one_prediction = applySigmoid(np.dot(layer_one_inputs, layer_one_weights))
# Take the prediction from layer one and forward proogate it to the second layer of weights for a final output.
layer_two_prediction = applySigmoid(np.dot(layer_one_prediction, layer_two_weights))
# How much were we off by?
layer_two_error = keys - layer_two_prediction
# Change in error just like in simple_mlp.py
layer_two_change_in_error = layer_two_error * applySigmoid(layer_two_prediction, True)
# Figure out how wrong our output for layer_one was by seeing how wrong the layer_two_prediction was
layer_one_error = np.dot(layer_two_change_in_error, layer_two_weights.T)
# Just like in simple_mlp.py
layer_one_change_in_error = layer_one_error * applySigmoid(layer_one_error, True)
# adjust your weights accoridngly.
layer_one_weights += np.dot(layer_one_prediction.T, layer_one_change_in_error)
layer_two_weights += np.dot(layer_two_prediction.T, layer_two_change_in_error)
if(iter == 0 or iter == 5000 or iter == 9999):
print_data(iter, inputs, keys, weights, prediction)
print "Output After Training:"
print prediction
def main():
np.random.seed(1)
inputs = np.array( [[0,0,1],
[1,0,1],
[0,1,1],
[1,1,1]])
keys = np.array([[0,1,1,0]]).T
layer_one_weights = 2*np.random.random((3,4)) - 1
layer_two_weights = 2*np.random.random((4,1)) - 1
train(inputs, keys, layer_one_weights, layer_two_weights)
if __name__ == "__main__":
main()