Last active
June 9, 2017 19:03
-
-
Save CamilleMo/273158a7d1dac1d70f181cb48594d4f8 to your computer and use it in GitHub Desktop.
A NN in pure Numpy - Source : https://www.analyticsvidhya.com/blog/2017/05/neural-network-from-scratch-in-python-and-r/
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
#Input array | |
X=np.array([[1,0,1,0],[1,0,1,1],[0,1,0,1]]) | |
#Output | |
y=np.array([[1],[1],[0]]) | |
#Sigmoid Function | |
def sigmoid (x): | |
return 1/(1 + np.exp(-x)) | |
#Derivative of Sigmoid Function | |
def derivatives_sigmoid(x): | |
return x * (1 - x) | |
#Variable initialization | |
epoch=5000 #Setting training iterations | |
lr=0.1 #Setting learning rate | |
inputlayer_neurons = X.shape[1] #number of features in data set | |
hiddenlayer_neurons = 3 #number of hidden layers neurons | |
output_neurons = 1 #number of neurons at output layer | |
#weight and bias initialization | |
wh=np.random.uniform(size=(inputlayer_neurons,hiddenlayer_neurons)) | |
bh=np.random.uniform(size=(1,hiddenlayer_neurons)) | |
wout=np.random.uniform(size=(hiddenlayer_neurons,output_neurons)) | |
bout=np.random.uniform(size=(1,output_neurons)) | |
for i in range(epoch): | |
#Forward Propogation | |
hidden_layer_input1=np.dot(X,wh) | |
hidden_layer_input=hidden_layer_input1 + bh | |
hiddenlayer_activations = sigmoid(hidden_layer_input) | |
output_layer_input1=np.dot(hiddenlayer_activations,wout) | |
output_layer_input= output_layer_input1+ bout | |
output = sigmoid(output_layer_input) | |
#Backpropagation | |
E = y-output | |
slope_output_layer = derivatives_sigmoid(output) | |
slope_hidden_layer = derivatives_sigmoid(hiddenlayer_activations) | |
d_output = E * slope_output_layer | |
Error_at_hidden_layer = d_output.dot(wout.T) | |
d_hiddenlayer = Error_at_hidden_layer * slope_hidden_layer | |
wout += hiddenlayer_activations.T.dot(d_output) *lr | |
bout += np.sum(d_output, axis=0,keepdims=True) *lr | |
wh += X.T.dot(d_hiddenlayer) *lr | |
bh += np.sum(d_hiddenlayer, axis=0,keepdims=True) *lr | |
print output |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment