Last active
June 12, 2019 14:05
-
-
Save Gorcenski/78462273f13aa417d88ead5b86b6ca89 to your computer and use it in GitHub Desktop.
FizzBuzz with keras, because why not
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# These are the things we will use. Implicitly, we'll assume tensorflow is the keras backend | |
import keras | |
import keras.backend as K | |
import numpy as np | |
from keras.models import Sequential | |
from keras.layers import Dense | |
# This is necessary only because of some version incompatibility where the default | |
# argument to argmax is broken with some combination of Python 3.7, keras 2.1.3 (or 2.1.6), | |
# and tensorflow 1.12. So we'll just reimplement it until the demons of dependency hell | |
# are vanquished once and for all | |
def get_categorical_accuracy_keras(y_true, y_pred): | |
return K.mean(K.equal(K.argmax(y_true, axis=1), K.argmax(y_pred, axis=1))) | |
# we need to do feature extraction, so what features does an integer have? | |
# clearly, we will take the modulus of an integer from every value between 1 and 15 | |
# this makes complete sense and isn't absurd at all | |
def extract_features(i): | |
return [i % (k + 1) for k in range(15)] | |
# We'll generate our own truth, because it's 2018 and that's how the world works | |
def fizzbuzz(i): | |
# fizzbuzz is class 3 | |
if i % 15 == 0: | |
return 3 | |
# buzz is class 2 | |
if i % 5 == 0: | |
return 2 | |
# fizz is class 1 | |
if i % 3 == 0: | |
return 1 | |
# fallthrough is class 0 | |
return 0 | |
# generate the training data and the labels | |
# obviously we don't want to train on the numbers between 1 and 100, so let's pick an | |
# arbitrary range. 500 to 2500 sounds excellent | |
data = np.array([extract_features(i) for i in range(500,2500)]) | |
labels = np.array([fizzbuzz(i) for i in range(500,2500)]) | |
dim = np.shape(data)[1] | |
# Convert labels to categorical one-hot encoding, as this is what keras expects | |
one_hot_labels = keras.utils.to_categorical(labels) | |
# specify the network | |
# why these network parameters? NO, that is the wrong question. The right question | |
# is "why NOT these network parameters?" | |
model = Sequential() | |
model.add(Dense(10, activation='relu', input_dim=dim)) | |
model.add(Dense(4, activation='softmax')) | |
model.compile(optimizer='rmsprop', | |
loss='categorical_crossentropy', | |
metrics=[get_categorical_accuracy_keras]) | |
# Train the model, iterating on the data in batches of 64 samples | |
model.fit(data, one_hot_labels, epochs=50, batch_size=64, shuffle=True) | |
# Uncomment this block if you want to run things in jupyter to see how things did | |
# score = model.evaluate(data, one_hot_labels, batch_size=64) | |
# print(score) | |
# model.metrics_names | |
# Finally, we will see if we pass the interview! | |
def classify(i): | |
prediction = model.predict(np.array([extract_features(i)])) | |
result = np.argmax(prediction) | |
if result == 1: | |
return 'fizz' | |
if result == 2: | |
return 'buzz' | |
if result == 3: | |
return 'fizzbuzz' | |
return str(i) | |
[classify(i) for i in range(1, 100 + 1)] |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment