diff --git a/nn_puzzle_solver.ipynb b/nn_puzzle_solver.ipynb index 7f890a2..335c1d6 100644 --- a/nn_puzzle_solver.ipynb +++ b/nn_puzzle_solver.ipynb @@ -1,12 +1,26 @@ { "cells": [ { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "# Setting up our imported libraries.\n", + "# Setting up our imported libraries." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using TensorFlow backend.\n" + ] + } + ], + "source": [ "from functools import reduce\n", "import numpy as np\n", "from keras.models import Sequential\n", @@ -15,23 +29,160 @@ ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "# Generating dummy data.\n", - "\n", - "data = np.random.random((1000,240))\n", - "output = np.random.random((1000, 29))\n", - "\n", - "# Replace this with parser code later." + "# Function definitions" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# Used to format our input binary state.\n", + "\n", + "def format_input(acc, elem):\n", + " hex_elem = (elem - (elem >> 4 << 4))\n", + " for x in range(16):\n", + " if x == hex_elem:\n", + " acc.append(1)\n", + " else:\n", + " acc.append(0)\n", + " hex_elem = (elem >> 4) % 16\n", + " for x in range(16):\n", + " if x == hex_elem:\n", + " acc.append(1)\n", + " else:\n", + " acc.append(0)\n", + " return acc\n", + "\n", + "# Calculate Manhattan distance between two points.\n", + "\n", + "def man_dist(x, y):\n", + " for a, b in zip(x, y):\n", + " a_one, a_two = x\n", + " b_one, b_two = y\n", + " \n", + " return (abs(a_one - b_one) + abs(a_two - b_two))\n", + " \n", + "# Calculate Manhattan distance between each set of two points in a list.\n", + " \n", + "def man_dist_state(x, y):\n", + " return sum(man_dist(a, b) for a, b in zip(x, y))\n", + "\n", + "# Used to format the positions we parsed from our binary input.\n", + "\n", + "def format_pos(acc, elem):\n", + " hex_elem = (elem[1] - (elem[1] >> 4 << 4))\n", + " if hex_elem == 0:\n", + " acc.append((hex_elem, (3,3)))\n", + " else:\n", + " acc.append((hex_elem, ((15 - ((elem[0]) * 2)) % 4,int((15 - ((elem[0]) * 2)) / 4))))\n", + " hex_elem = (elem[1] >> 4) % 16\n", + " if hex_elem == 0:\n", + " acc.append((hex_elem, (3,3)))\n", + " else:\n", + " acc.append((hex_elem, ((15 - ((elem[0]) * 2 + 1)) % 4,int((15 - ((elem[0]) * 2 + 1)) / 4))))\n", + " \n", + " return acc\n", + "\n", + "# The title of this function is slightly misleading.\n", + "# I'm simply generating a list of positions that each\n", + "# puzzle piece in the current parsed state SHOULD be at.\n", + "# I organize this in order of the pieces as they were\n", + "# parsed so the two lists line up perfectly.\n", + "\n", + "def generate_pos(acc, elem):\n", + " if(elem[0] == 0):\n", + " acc.append((3,3))\n", + " else:\n", + " acc.append((((elem[0] - 1) % 4), (int((elem[0] - 1)/4))))\n", + " \n", + " return acc\n", + "\n", + "# Used to format our ending Manhattan distance into a format\n", + "# that can be compared with our 29 output neurons.\n", + "\n", + "def format_man_dist(elem):\n", + " acc = []\n", + " for x in range(28, -1, -1):\n", + " if x == elem:\n", + " acc.append(1)\n", + " else:\n", + " acc.append(0)\n", + " return acc\n" + ] + }, + { + "cell_type": "markdown", "metadata": {}, + "source": [ + "# Parsing input" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": true + }, + "source": [ + "## Creating Target data\n", + "\n", + "For this cell, I wanted to clearly explain how the script past this point works. Since I don't want to parse all of the binary states from each file that I'm going to use all at once and hold them in RAM, this instead parses 1 binary state at a time (8 bytes, meaning 64 bits, and we ignore the 1st 4 bits), does the calculations and input formatting needed, and appends the end result per state to a list to be used later." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(29):\n", + " filename = join('data', str(i) + '.bin')\n", + " \n", + " # Debugging to print the current file from which states are being parsed.\n", + " #print(i)\n", + " \n", + " with open(filename, 'rb') as f:\n", + " data = f.read(8)\n", + " counter = 0\n", + " target = []\n", + "\n", + " while(data and counter < 1000):\n", + " target.append(format_man_dist(i))\n", + " \n", + " data = f.read(8)\n", + " counter += 1" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": true + }, + "source": [ + "# Parsing and Training the neural network" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Setting up the neural network\n", + "\n", + "Before we run our training/parsing, we need to make sure Keras has configured the neural network properly to our specifications." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ "# Sets up a Sequential model, Sequential is all\n", @@ -50,31 +201,507 @@ "# Configure the learning process.\n", "\n", "model.compile(optimizer='sgd',\n", - " loss='mean_squared_error',\n", + " loss='mean_absolute_percentage_error',\n", " metrics=['accuracy'])" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Training the neural network\n", + "\n", + "Again, this part has the parsing and training in hand, since as we parse and format the states from our file input, we feed those batches to our neural network to train with." + ] + }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/5\n", + "1/1 [==============================] - 0s 490ms/step - loss: 500344896.0000 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1/1 [==============================] - 0s 2ms/step - loss: 1.2887 - acc: 1.0000\n", + "Epoch 3/5\n", + "1/1 [==============================] - 0s 2ms/step - loss: 0.6327 - acc: 1.0000\n", + "Epoch 4/5\n", + "1/1 [==============================] - 0s 2ms/step - loss: 0.3702 - acc: 1.0000\n", + "Epoch 5/5\n", + "1/1 [==============================] - 0s 2ms/step - loss: 0.2573 - acc: 1.0000\n", + "Epoch 1/5\n", + "2/2 [==============================] - 0s 1ms/step - loss: 32517106.0000 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "2/2 [==============================] - 0s 721us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "2/2 [==============================] - 0s 738us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "2/2 [==============================] - 0s 783us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "2/2 [==============================] - 0s 1ms/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "4/4 [==============================] - 0s 547us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "4/4 [==============================] - 0s 550us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "4/4 [==============================] - 0s 480us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "4/4 [==============================] - 0s 451us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "4/4 [==============================] - 0s 367us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "10/10 [==============================] - 0s 155us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "10/10 [==============================] - 0s 197us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "10/10 [==============================] - 0s 157us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "10/10 [==============================] - 0s 167us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "10/10 [==============================] - 0s 162us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "24/24 [==============================] - 0s 73us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "24/24 [==============================] - 0s 85us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "24/24 [==============================] - 0s 88us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "24/24 [==============================] - 0s 85us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "24/24 [==============================] - 0s 85us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "54/54 [==============================] - 0s 48us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "54/54 [==============================] - 0s 67us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "54/54 [==============================] - 0s 47us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "54/54 [==============================] - 0s 46us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "54/54 [==============================] - 0s 37us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "107/107 [==============================] - 0s 37us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "107/107 [==============================] - 0s 41us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "107/107 [==============================] - 0s 60us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "107/107 [==============================] - 0s 37us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "107/107 [==============================] - 0s 55us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "222/222 [==============================] - 0s 26us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "222/222 [==============================] - 0s 25us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "222/222 [==============================] - 0s 29us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "222/222 [==============================] - 0s 34us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "222/222 [==============================] - 0s 26us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "496/496 [==============================] - 0s 26us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "496/496 [==============================] - 0s 24us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "496/496 [==============================] - 0s 21us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "496/496 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "496/496 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 22us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 21us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 22us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 13us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 13us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 21us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 23us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 14us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 22us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 25us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 21us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 23us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 14us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 14us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 21us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 14us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 1.0000\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 1.0000\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 1.0000\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 1.0000\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 1.0000\n" + ] + } + ], + "source": [ + "for i in range(29):\n", + " filename = join('data', str(i) + '.bin')\n", + " \n", + " # Debugging to print the current file from which states are being parsed.\n", + " #print(i)\n", + " \n", + " with open(filename, 'rb') as f:\n", + " data = f.read(8)\n", + " counter = 0\n", + " \n", + " temp = []\n", + " target = []\n", + "\n", + " while(data and counter < 1000):\n", + " bin_data = reduce(format_input, list(data), [])\n", + " bin_data.reverse()\n", + " bin_data = bin_data[16:]\n", + "\n", + " pos_data = reduce(format_pos, enumerate(list(data)), [])\n", + " pos_data.reverse()\n", + "\n", + " state_pos = []\n", + "\n", + " for p in pos_data:\n", + " state_pos.append(p[1])\n", + "\n", + " target_pos = reduce(generate_pos, pos_data, [])\n", + "\n", + " #for i in range(int(len(bin_data)/256)):\n", + " # for x in range((i*16) + 1,(i*16) + 16):\n", + " # temp = []\n", + " # for y in range(16):\n", + " # temp.append(bin_data[(x*16) + y])\n", + " # print(temp)\n", + "\n", + " target.append(format_man_dist(i))\n", + "\n", + " temp.append(bin_data)\n", + "\n", + " data = f.read(8)\n", + " counter += 1\n", + "\n", + " # Train the network.\n", + "\n", + " model.fit(np.array(temp), np.array(target), epochs=5, batch_size=100)\n", + " #model.train_on_batch(np.array(temp), np.array(target))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Testing the neural network" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": true + }, + "source": [ + "## Creating the testing target data\n", + "\n", + "Similarly, here is a separate cell that just parses and creates the necessary target and testing arrays that we will need to test the neural network using Keras." + ] + }, + { + "cell_type": "code", + "execution_count": 21, "metadata": {}, "outputs": [], + "source": [ + "# Used for testing data\n", + "\n", + "with open('data/18.bin', 'rb') as f:\n", + " \n", + " for i in range(1000):\n", + " data = f.read(8)\n", + " \n", + " data = f.read(8)\n", + " \n", + " counter = 0\n", + " \n", + " temp = []\n", + " \n", + " target = []\n", + " \n", + " while(data):\n", + " bin_data = reduce(format_input, list(data), [])\n", + " bin_data.reverse()\n", + " bin_data = bin_data[16:]\n", + "\n", + " pos_data = reduce(format_pos, enumerate(list(data)), [])\n", + " pos_data.reverse()\n", + " pos_data = pos_data[16:]\n", + "\n", + " target_pos = reduce(generate_pos, pos_data, [])\n", + "\n", + " target.append(format_man_dist(target_pos))\n", + "\n", + " temp.append(bin_data)\n", + " \n", + " counter += 1\n", + " data = f.read(8)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluating our test data" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "446342/446342 [==============================] - 1s 2us/step\n" + ] + } + ], + "source": [ + "# Evaluate accuracy\n", + " \n", + "loss_and_metrics = model.evaluate(np.array(temp),np.array(target), batch_size=1000)\n", + " \n", + "# Generating predictions:\n", + " \n", + "predictions = model.predict(np.array(temp), batch_size=1000)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.0\n", + "[0.0, 1.0]\n", + "['loss', 'acc']\n" + ] + } + ], + "source": [ + "output = []\n", + "\n", + "for p in predictions:\n", + " output.append(np.argmax(p))\n", + " \n", + "print(np.array(output).mean())\n", + "\n", + "print(loss_and_metrics)\n", + "\n", + "print(model.metrics_names)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0\n" + ] + } + ], "source": [ "\n", - "def format_input(acc, elem):\n", - " hex_elem = (elem - (elem >> 4 << 4))\n", - " for x in range(16):\n", - " if x == hex_elem:\n", - " acc.append(1)\n", - " else:\n", - " acc.append(0)\n", - " hex_elem = (elem >> 4) % 16\n", - " for x in range(16):\n", - " if x == hex_elem:\n", - " acc.append(1)\n", - " else:\n", - " acc.append(0)\n", - " return acc\n", "\n", "with open('data/0.bin', 'rb') as f:\n", " data = f.read(8)\n", @@ -84,7 +711,6 @@ " bin_data = reduce(format_input, list(data), [])\n", " bin_data.reverse()\n", " bin_data = bin_data[16:]\n", - " print(bin_data)\n", "\n", " print(counter)\n", " \n", @@ -93,7 +719,6 @@ " temp = []\n", " for y in range(16):\n", " temp.append(bin_data[(x*16) + y])\n", - " print(temp)\n", " \n", " data = f.read(8)\n", " counter += 1" @@ -113,50 +738,18 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n" + ] + } + ], "source": [ - "def man_dist(x, y):\n", - " for a, b in zip(x, y):\n", - " a_one, a_two = x\n", - " b_one, b_two = y\n", - " \n", - " return (abs(a_one - b_one) + abs(a_two - b_two))\n", - " \n", - "def man_dist_state(x, y):\n", - " return sum(man_dist(a, b) for a, b in zip(x, y))\n", - "\n", - "def format_pos(acc, elem):\n", - " hex_elem = (elem[1] - (elem[1] >> 4 << 4))\n", - " if hex_elem == 0:\n", - " acc.append((hex_elem, (3,3)))\n", - " else:\n", - " acc.append((hex_elem, ((15 - ((elem[0]) * 2)) % 4,int((15 - ((elem[0]) * 2)) / 4))))\n", - " hex_elem = (elem[1] >> 4) % 16\n", - " if hex_elem == 0:\n", - " acc.append((hex_elem, (3,3)))\n", - " else:\n", - " acc.append((hex_elem, ((15 - ((elem[0]) * 2 + 1)) % 4,int((15 - ((elem[0]) * 2 + 1)) / 4))))\n", - " \n", - " return acc\n", - "\n", - "def generate_pos(acc, elem):\n", - " if(elem[0] == 0):\n", - " acc.append((3,3))\n", - " else:\n", - " acc.append((((elem[0] - 1) % 4), (int((elem[0] - 1)/4))))\n", - " \n", - " return acc\n", - "\n", - "def format_man_dist(elem):\n", - " acc = []\n", - " for x in range(28, -1, -1):\n", - " if x == elem:\n", - " acc.append(1)\n", - " else:\n", - " acc.append(0)\n", - " return acc\n", "\n", "with open('data/0.bin', 'rb') as f:\n", " data = f.read(8)\n", @@ -182,9 +775,341 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0\n", + "Epoch 1/5\n", + "1/1 [==============================] - 0s 2ms/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1/1 [==============================] - 0s 2ms/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1/1 [==============================] - 0s 2ms/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1/1 [==============================] - 0s 2ms/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1/1 [==============================] - 0s 2ms/step - loss: 3.4483 - acc: 0.0000e+00\n", + "1\n", + "Epoch 1/5\n", + "2/2 [==============================] - 0s 987us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "2/2 [==============================] - 0s 924us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "2/2 [==============================] - 0s 951us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "2/2 [==============================] - 0s 927us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "2/2 [==============================] - 0s 884us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "2\n", + "Epoch 1/5\n", + "4/4 [==============================] - 0s 538us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "4/4 [==============================] - 0s 639us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "4/4 [==============================] - 0s 776us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "4/4 [==============================] - 0s 635us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "4/4 [==============================] - 0s 812us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "3\n", + "Epoch 1/5\n", + "10/10 [==============================] - 0s 207us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "10/10 [==============================] - 0s 211us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "10/10 [==============================] - 0s 232us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "10/10 [==============================] - 0s 259us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "10/10 [==============================] - 0s 188us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "4\n", + "Epoch 1/5\n", + "24/24 [==============================] - 0s 100us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "24/24 [==============================] - 0s 90us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "24/24 [==============================] - 0s 111us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "24/24 [==============================] - 0s 106us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "24/24 [==============================] - 0s 97us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "5\n", + "Epoch 1/5\n", + "54/54 [==============================] - 0s 44us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "54/54 [==============================] - 0s 52us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "54/54 [==============================] - 0s 33us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "54/54 [==============================] - 0s 41us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "54/54 [==============================] - 0s 50us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "6\n", + "Epoch 1/5\n", + "107/107 [==============================] - 0s 53us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "107/107 [==============================] - 0s 45us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "107/107 [==============================] - 0s 42us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "107/107 [==============================] - 0s 39us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "107/107 [==============================] - 0s 42us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "7\n", + "Epoch 1/5\n", + "222/222 [==============================] - 0s 31us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "222/222 [==============================] - 0s 32us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "222/222 [==============================] - 0s 34us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "222/222 [==============================] - 0s 35us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "222/222 [==============================] - 0s 32us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "8\n", + "Epoch 1/5\n", + "496/496 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "496/496 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "496/496 [==============================] - 0s 23us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "496/496 [==============================] - 0s 22us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "496/496 [==============================] - 0s 23us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "9\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 21us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "10\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "11\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "12\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "13\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 24us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "14\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 14us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 14us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "15\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 21us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "16\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "17\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 21us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "18\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 23us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 22us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "19\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 21us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "20\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "21\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 23us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "22\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "23\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 15us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 16us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "24\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 22us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 23us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 23us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "25\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 24us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 21us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 22us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "26\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 21us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 25us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 21us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 22us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "27\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 18us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 21us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 21us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - ETA: 0s - loss: 3.4483 - acc: 0.0000e+0 - 0s 19us/step - loss: 3.4483 - acc: 0.0000e+00\n", + "28\n", + "Epoch 1/5\n", + "1000/1000 [==============================] - 0s 17us/step - loss: 3.4483 - acc: 1.0000\n", + "Epoch 2/5\n", + "1000/1000 [==============================] - 0s 22us/step - loss: 3.4483 - acc: 1.0000\n", + "Epoch 3/5\n", + "1000/1000 [==============================] - 0s 19us/step - loss: 3.4483 - acc: 1.0000\n", + "Epoch 4/5\n", + "1000/1000 [==============================] - 0s 22us/step - loss: 3.4483 - acc: 1.0000\n", + "Epoch 5/5\n", + "1000/1000 [==============================] - 0s 20us/step - loss: 3.4483 - acc: 1.0000\n" + ] + } + ], "source": [ "for i in range(29):\n", " filename = join('data', str(i) + '.bin')\n", @@ -194,6 +1119,9 @@ " with open(filename, 'rb') as f:\n", " data = f.read(8)\n", " counter = 0\n", + " \n", + " temp = []\n", + " target = []\n", "\n", " while(data and counter < 1000):\n", " bin_data = reduce(format_input, list(data), [])\n", @@ -217,43 +1145,117 @@ " # temp.append(bin_data[(x*16) + y])\n", " # print(temp)\n", "\n", - " target = []\n", - " target.append(format_man_dist(man_dist_state(state_pos, target_pos)))\n", + " target.append(format_man_dist(i))\n", "\n", - " temp = []\n", " temp.append(bin_data)\n", "\n", - " # Train the network.\n", - "\n", - " #model.fit(data, output, epochs=5, batch_size=10)\n", - " model.train_on_batch(np.array(temp), np.array(target))\n", - "\n", " data = f.read(8)\n", - " counter += 1" + " counter += 1\n", + "\n", + " # Train the network.\n", + "\n", + " model.fit(np.array(temp), np.array(target), epochs=5, batch_size=100)\n", + " #model.train_on_batch(np.array(temp), np.array(target))" ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, + "execution_count": 16, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ - "with open('data/2.bin', 'rb') as f:\n", + "# Used for testing data\n", + "\n", + "with open('data/18.bin', 'rb') as f:\n", + " \n", + " for i in range(300):\n", + " data = f.read(8)\n", + " \n", " data = f.read(8)\n", + " \n", " counter = 0\n", + " \n", + " temp = []\n", + " \n", + " target = []\n", " \n", " while(data):\n", " bin_data = reduce(format_input, list(data), [])\n", " bin_data.reverse()\n", " bin_data = bin_data[16:]\n", - " \n", - " temp = []\n", + "\n", + " pos_data = reduce(format_pos, enumerate(list(data)), [])\n", + " pos_data.reverse()\n", + " pos_data = pos_data[16:]\n", + "\n", + " target_pos = reduce(generate_pos, pos_data, [])\n", + "\n", + " #for i in range(int(len(bin_data)/256)):\n", + " # for x in range((i*16) + 1,(i*16) + 16):\n", + " # temp = []\n", + " # for y in range(16):\n", + " # temp.append(bin_data[(x*16) + y])\n", + " # print(temp)\n", + "\n", + " target.append(format_man_dist(target_pos))\n", + "\n", " temp.append(bin_data)\n", + " \n", + " counter += 1\n", + " data = f.read(8)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "447042/447042 [==============================] - 1s 2us/step\n" + ] + } + ], + "source": [ + "# Evaluate accuracy\n", + " \n", + "loss_and_metrics = model.evaluate(np.array(temp),np.array(target), batch_size=1000)\n", + " \n", + "# Generating predictions:\n", + " \n", + "predictions = model.predict(np.array(temp), batch_size=1000)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.0\n", + "[0.0, 1.0]\n", + "['loss', 'acc']\n" + ] + } + ], + "source": [ + "output = []\n", "\n", - " # Generating predictions:\n", + "for p in predictions:\n", + " output.append(np.argmax(p))\n", + " \n", + "print(np.array(output).mean())\n", "\n", - " predictions = model.predict(np.array(temp), batch_size=1)\n", - " print(predictions)" + "print(loss_and_metrics)\n", + "\n", + "print(model.metrics_names)" ] }, {