This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def model(X, Y, layers_dims, learning_rate=0.01, num_iterations=1000, | |
print_cost=True, hidden_layers_activation_fn="relu", | |
initialization_method="he"): | |
np.random.seed(1) | |
# initialize cost list | |
cost_list = [] | |
# initialize parameters | |
if initialization_method == "zeros": |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def initialize_parameters_zeros(layers_dims): | |
np.random.seed(1) | |
parameters = {} | |
L = len(layers_dims) | |
for l in range(1, L): | |
parameters["W" + str(l)] = np.zeros( | |
(layers_dims[l], layers_dims[l - 1])) | |
parameters["b" + str(l)] = np.zeros((layers_dims[l], 1)) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Loading packages | |
import sys | |
import h5py | |
import matplotlib.pyplot as plt | |
import numpy as np | |
import seaborn as sns | |
sys.path.append("../scripts/") | |
from coding_neural_network_from_scratch import (L_model_forward, |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def model_with_regularization( | |
X, y, layers_dims, learning_rate=0.01, num_epochs=3000, | |
print_cost=False, hidden_layers_activation_fn="relu", lambd=0): | |
# get number of examples | |
m = X.shape[1] | |
# to get consistents output | |
np.random.seed(1) | |
# initialize parameters |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def linear_backword_reg(dZ, cache, lambd=0): | |
A_prev, W, b = cache | |
m = A_prev.shape[1] | |
dW = (1 / m) * np.dot(dZ, A_prev.T) + (lambd / m) * W | |
db = (1 / m) * np.sum(dZ, axis=1, keepdims=True) | |
dA_prev = np.dot(W.T, dZ) | |
assert (dA_prev.shape == A_prev.shape) | |
assert (dW.shape == W.shape) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def compute_cost_reg(AL, y, parameters, lambd=0): | |
# number of examples | |
m = y.shape[1] | |
# compute traditional cross entropy cost | |
cross_entropy_cost = compute_cost(AL, y) | |
# convert parameters dictionary to vector | |
parameters_vector = dictionary_to_vector(parameters) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Loading packages | |
import sys | |
import h5py | |
import matplotlib.pyplot as plt | |
import numpy as np | |
import seaborn as sns | |
sys.path.append("../scripts/") | |
from coding_neural_network_from_scratch import (initialize_parameters, |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Define the multi-layer model using all the helper functions we wrote before | |
def L_layer_model( | |
X, y, layers_dims, learning_rate=0.01, num_iterations=3000, | |
print_cost=True, hidden_layers_activation_fn="relu"): | |
np.random.seed(1) | |
# initialize parameters | |
parameters = initialize_parameters(layers_dims) | |
# intialize cost list |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Import training dataset | |
train_dataset = h5py.File("../data/train_catvnoncat.h5") | |
X_train = np.array(train_dataset["train_set_x"]) | |
y_train = np.array(train_dataset["train_set_y"]) | |
test_dataset = h5py.File("../data/test_catvnoncat.h5") | |
X_test = np.array(test_dataset["test_set_x"]) | |
y_test = np.array(test_dataset["test_set_y"]) | |
# print the shape of input data and label vector |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def update_parameters(parameters, grads, learning_rate): | |
L = len(parameters) // 2 | |
for l in range(1, L + 1): | |
parameters["W" + str(l)] = parameters[ | |
"W" + str(l)] - learning_rate * grads["dW" + str(l)] | |
parameters["b" + str(l)] = parameters[ | |
"b" + str(l)] - learning_rate * grads["db" + str(l)] | |
return parameters |
NewerOlder