Created
February 4, 2017 17:12
-
-
Save nikashitsa/d5a78111291e9441f34c68eaf367e6ac to your computer and use it in GitHub Desktop.
Bad but fast conv example
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"cells": [ | |
{ | |
"metadata": {}, | |
"cell_type": "markdown", | |
"source": "Bad but fast conv example\n=====\nbased on [this example](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/convolutional_network.py)" | |
}, | |
{ | |
"metadata": { | |
"scrolled": true, | |
"collapsed": false, | |
"trusted": true | |
}, | |
"cell_type": "code", | |
"source": "from __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.platform import gfile\n\n# Import MNIST data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\n# Parameters\nlearning_rate = 0.005\ntraining_iters = 5000\nbatch_size = 128\ndisplay_step = 10\n\n# Network Parameters\nn_input = 784 # MNIST data input (img shape: 28*28)\nn_classes = 10 # MNIST total classes (0-9 digits)\ndropout = 0.75 # Dropout, probability to keep units\n\n# tf Graph input\nx = tf.placeholder(tf.float32, [None, n_input], name='input')\ny = tf.placeholder(tf.float32, [None, n_classes])\nkeep_prob = tf.placeholder(tf.float32, name='keep_prob') #dropout (keep probability)\n\n# Create some wrappers for simplicity\ndef conv2d(x, W, b, strides=1):\n # Conv2D wrapper, with bias and relu activation\n x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')\n x = tf.nn.bias_add(x, b)\n return tf.nn.relu(x)\n\n\ndef maxpool2d(x, k=2):\n # MaxPool2D wrapper\n return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],\n padding='SAME')\n\n\n# Create model\ndef conv_net(x, weights, biases, dropout):\n # Reshape input picture\n x = tf.reshape(x, shape=[-1, 28, 28, 1])\n\n # Convolution Layer\n conv1 = conv2d(x, weights['wc1'], biases['bc1'])\n # Max Pooling (down-sampling)\n conv1 = maxpool2d(conv1, k=2)\n\n # Convolution Layer\n conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])\n # Max Pooling (down-sampling)\n conv2 = maxpool2d(conv2, k=2)\n\n # Fully connected layer\n # Reshape conv2 output to fit fully connected layer input\n fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])\n fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])\n fc1 = tf.nn.relu(fc1)\n # Apply Dropout\n fc1 = tf.nn.dropout(fc1, dropout)\n\n # Output, class prediction\n out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])\n return out\n\n# Store layers weight & bias\nweights = {\n # 5x5 conv, 1 input, 16 outputs\n 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 16])),\n # 5x5 conv, 16 inputs, 32 outputs\n 'wc2': tf.Variable(tf.random_normal([5, 5, 16, 32])),\n # fully connected, 7*7*32 inputs, 256 outputs\n 'wd1': tf.Variable(tf.random_normal([7*7*32, 256])),\n # 256 inputs, 10 outputs (class prediction)\n 'out': tf.Variable(tf.random_normal([256, n_classes]))\n}\n\nbiases = {\n 'bc1': tf.Variable(tf.random_normal([16])),\n 'bc2': tf.Variable(tf.random_normal([32])),\n 'bd1': tf.Variable(tf.random_normal([256])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n}\n\n# Construct model\npred = conv_net(x, weights, biases, keep_prob)\nfinal_tensor = tf.nn.softmax(pred, name='final_result')\n\n# Define loss and optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# Evaluate model\ncorrect_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1), name='correct_pred')\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n# Initializing the variables\ninit = tf.global_variables_initializer()\n\n# Launch the graph\nwith tf.Session() as sess:\n sess.run(init)\n step = 1\n # Keep training until reach max iterations\n while step * batch_size < training_iters:\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n # Run optimization op (backprop)\n sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,\n keep_prob: dropout})\n if step % display_step == 0:\n # Calculate batch loss and accuracy\n loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,\n y: batch_y,\n keep_prob: 1.})\n print(\"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \\\n \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \\\n \"{:.5f}\".format(acc))\n step += 1\n print(\"Optimization Finished!\")\n\n # Calculate accuracy for 256 mnist test images\n print(\"Testing Accuracy:\", \\\n sess.run(accuracy, feed_dict={x: mnist.test.images[:256],\n y: mnist.test.labels[:256],\n keep_prob: 1.}))\n \n output_graph_def = graph_util.convert_variables_to_constants(\n sess, sess.graph.as_graph_def(), ['final_result'])\n\n with gfile.FastGFile('./frozen_model.pb', 'wb') as f:\n f.write(output_graph_def.SerializeToString())", | |
"execution_count": 1, | |
"outputs": [ | |
{ | |
"output_type": "stream", | |
"name": "stdout", | |
"text": "Extracting /tmp/data/train-images-idx3-ubyte.gz\nExtracting /tmp/data/train-labels-idx1-ubyte.gz\nExtracting /tmp/data/t10k-images-idx3-ubyte.gz\nExtracting /tmp/data/t10k-labels-idx1-ubyte.gz\nIter 1280, Minibatch Loss= 3652.552490, Training Accuracy= 0.36719\nIter 2560, Minibatch Loss= 1037.778564, Training Accuracy= 0.67188\nIter 3840, Minibatch Loss= 839.923706, Training Accuracy= 0.73438\nOptimization Finished!\nTesting Accuracy: 0.808594\nINFO:tensorflow:Froze 28 variables.\nConverted 8 variables to const ops.\n" | |
} | |
] | |
}, | |
{ | |
"metadata": { | |
"collapsed": true, | |
"trusted": true | |
}, | |
"cell_type": "code", | |
"source": "", | |
"execution_count": null, | |
"outputs": [] | |
} | |
], | |
"metadata": { | |
"kernelspec": { | |
"name": "python2", | |
"display_name": "Python 2", | |
"language": "python" | |
}, | |
"language_info": { | |
"mimetype": "text/x-python", | |
"nbconvert_exporter": "python", | |
"name": "python", | |
"pygments_lexer": "ipython2", | |
"version": "2.7.12", | |
"file_extension": ".py", | |
"codemirror_mode": { | |
"version": 2, | |
"name": "ipython" | |
} | |
}, | |
"gist": { | |
"id": "", | |
"data": { | |
"description": "Bad but fast conv example", | |
"public": true | |
} | |
} | |
}, | |
"nbformat": 4, | |
"nbformat_minor": 2 | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment