Skip to content

Instantly share code, notes, and snippets.

@Bill-Park
Last active February 28, 2020 06:54
Show Gist options
  • Save Bill-Park/bf4d086db213e1fae12005ccf6d77f74 to your computer and use it in GitHub Desktop.
Save Bill-Park/bf4d086db213e1fae12005ccf6d77f74 to your computer and use it in GitHub Desktop.
rpsTutorial.ipynb
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "rpsTutorial.ipynb",
"provenance": [],
"collapsed_sections": [],
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/Bill-Park/bf4d086db213e1fae12005ccf6d77f74/rpstutorial.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"metadata": {
"id": "pr6oPG81fP-v",
"colab_type": "code",
"colab": {}
},
"source": [
"%tensorflow_version 2.x\n",
"import tensorflow as tf\n",
"device_name = tf.test.gpu_device_name()\n",
"if device_name != '/device:GPU:0':\n",
" raise SystemError('GPU device not found')\n",
"print('Found GPU at: {}'.format(device_name))"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "Ett5cpuARsIL",
"colab_type": "code",
"colab": {}
},
"source": [
"from tensorflow.keras.models import Sequential\n",
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n",
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
"\n",
"import os\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "ZFvs98_ASBXt",
"colab_type": "code",
"colab": {}
},
"source": [
"!wget https://storage.googleapis.com/laurencemoroney-blog.appspot.com/rps.zip -O /tmp/rps.zip\n",
"!unzip /tmp/rps.zip -d /tmp"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "PWSIEDYKSo8J",
"colab_type": "code",
"colab": {}
},
"source": [
"!wget https://storage.googleapis.com/laurencemoroney-blog.appspot.com/rps-test-set.zip -O /tmp/rps-test-set.zip\n",
"!unzip /tmp/rps-test-set.zip -d /tmp"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "3nx3lkVOSyDT",
"colab_type": "code",
"colab": {}
},
"source": [
"train_dir = os.path.join(\"/\", \"tmp\", \"rps\")\n",
"\n",
"train_rock_dir = os.path.join(train_dir, \"rock\")\n",
"train_paper_dir = os.path.join(train_dir, \"paper\")\n",
"train_scissors_dir = os.path.join(train_dir, \"scissors\")"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "e1ZM7CMbS3bq",
"colab_type": "code",
"colab": {}
},
"source": [
"validation_dir = os.path.join(\"/\", \"tmp\", \"rps-test-set\")\n",
"\n",
"validation_rock_dir = os.path.join(validation_dir, \"rock\")\n",
"validation_paper_dir = os.path.join(validation_dir, \"paper\")\n",
"validation_scissors_dir = os.path.join(validation_dir, \"scissors\")"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "pi8yZj6nS8XD",
"colab_type": "code",
"colab": {}
},
"source": [
"total_train = len(os.listdir(train_rock_dir)) + len(os.listdir(train_paper_dir)) + len(os.listdir(train_scissors_dir))\n",
"total_val = len(os.listdir(validation_rock_dir)) + len(os.listdir(validation_paper_dir)) + len(os.listdir(validation_scissors_dir))\n",
"\n",
"print(\"Total training images:\", total_train)\n",
"print(\"Total validation images:\", total_val)"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "yS-TnPFGYidl",
"colab_type": "code",
"colab": {}
},
"source": [
"rock_dir = train_rock_dir\n",
"paper_dir = train_paper_dir\n",
"scissors_dir = train_scissors_dir\n",
"\n",
"rock_files = os.listdir(rock_dir)\n",
"paper_files = os.listdir(paper_dir)\n",
"scissors_files = os.listdir(scissors_dir)"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "ahRZipZoXTiy",
"colab_type": "code",
"colab": {}
},
"source": [
"%matplotlib inline\n",
"\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib.image as mpimg\n",
"\n",
"pic_index = 2\n",
"\n",
"next_rock = [os.path.join(rock_dir, fname) \n",
" for fname in rock_files[pic_index-2:pic_index]]\n",
"next_paper = [os.path.join(paper_dir, fname) \n",
" for fname in paper_files[pic_index-2:pic_index]]\n",
"next_scissors = [os.path.join(scissors_dir, fname) \n",
" for fname in scissors_files[pic_index-2:pic_index]]\n",
"\n",
"for i, img_path in enumerate(next_rock+next_paper+next_scissors):\n",
" #print(img_path)\n",
" img = mpimg.imread(img_path)\n",
" plt.imshow(img)\n",
" plt.axis('Off')\n",
" plt.show()"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "JVOG8hw3YypA",
"colab_type": "code",
"colab": {}
},
"source": [
"import keras_preprocessing\n",
"from keras_preprocessing import image\n",
"from keras_preprocessing.image import ImageDataGenerator\n",
"\n",
"TRAINING_DIR = \"/tmp/rps/\""
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "B61KjJv-ZF16",
"colab_type": "code",
"colab": {}
},
"source": [
"training_datagen = ImageDataGenerator(\n",
" rescale = 1./255,\n",
"\t rotation_range=40,\n",
" width_shift_range=0.2,\n",
" height_shift_range=0.2,\n",
" shear_range=0.2,\n",
" zoom_range=0.2,\n",
" horizontal_flip=True,\n",
" fill_mode='nearest')"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "qHnBzZ5kZJHW",
"colab_type": "code",
"colab": {}
},
"source": [
"VALIDATION_DIR = \"/tmp/rps-test-set/\"\n",
"validation_datagen = ImageDataGenerator(rescale = 1./255)"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "a0PLwOLzZLpa",
"colab_type": "code",
"colab": {}
},
"source": [
"train_generator = training_datagen.flow_from_directory(\n",
"\tTRAINING_DIR,\n",
"\ttarget_size=(150,150),\n",
"\tclass_mode='categorical'\n",
")"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "_OPIzHuoZOXC",
"colab_type": "code",
"colab": {}
},
"source": [
"validation_generator = validation_datagen.flow_from_directory(\n",
"\tVALIDATION_DIR,\n",
"\ttarget_size=(150,150),\n",
"\tclass_mode='categorical'\n",
")"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "nOHswUa3Z9z6",
"colab_type": "code",
"colab": {}
},
"source": [
"model = tf.keras.models.Sequential([\n",
" # Note the input shape is the desired size of the image 150x150 with 3 bytes color\n",
" # This is the first convolution\n",
" tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(150, 150, 3)),\n",
" tf.keras.layers.MaxPooling2D(2, 2),\n",
" # The second convolution\n",
" tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\n",
" tf.keras.layers.MaxPooling2D(2,2),\n",
" # The third convolution\n",
" tf.keras.layers.Conv2D(128, (3,3), activation='relu'),\n",
" tf.keras.layers.MaxPooling2D(2,2),\n",
" # The fourth convolution\n",
" tf.keras.layers.Conv2D(128, (3,3), activation='relu'),\n",
" tf.keras.layers.MaxPooling2D(2,2),\n",
" # Flatten the results to feed into a DNN\n",
" tf.keras.layers.Flatten(),\n",
" tf.keras.layers.Dropout(0.5),\n",
" # 512 neuron hidden layer\n",
" tf.keras.layers.Dense(512, activation='relu'),\n",
" tf.keras.layers.Dense(3, activation='softmax')\n",
"])\n",
"\n",
"model.summary()\n",
"\n",
"model.compile(loss = 'categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "3ZaLMUMbaCKU",
"colab_type": "code",
"colab": {}
},
"source": [
"history = model.fit(\n",
" train_generator, \n",
" epochs=10, \n",
" validation_data=validation_generator,\n",
" verbose=True\n",
")"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "5suZ71hChRKA",
"colab_type": "code",
"colab": {}
},
"source": [
"import numpy as np\n",
"from google.colab import files\n",
"from keras.preprocessing import image\n",
"\n",
"uploaded = files.upload()\n",
"\n",
"for fn in uploaded.keys():\n",
" \n",
" # predicting images\n",
" path = fn\n",
" img = image.load_img(path, target_size=(150, 150))\n",
" x = image.img_to_array(img)\n",
" x = np.expand_dims(x, axis=0)\n",
"\n",
" images = np.vstack([x])\n",
" classes = model.predict(images, batch_size=10)\n",
" print(fn)\n",
" print(classes)"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "b8AnbtWXWFRy",
"colab_type": "code",
"colab": {}
},
"source": [
"model.save(\"rps.h5\")\n",
"\n",
"files.download(\"rps.h5\")"
],
"execution_count": 0,
"outputs": []
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment