609 lines
26 KiB
Text
609 lines
26 KiB
Text
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Setting up our imported libraries."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Using TensorFlow backend.\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"from functools import reduce\n",
|
|
"import numpy as np\n",
|
|
"from keras.models import Sequential\n",
|
|
"from keras.layers import Dense\n",
|
|
"from os.path import join"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Function definitions"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Used to format our input binary state.\n",
|
|
"\n",
|
|
"def format_input(acc, elem):\n",
|
|
" hex_elem = (elem - (elem >> 4 << 4))\n",
|
|
" for x in range(16):\n",
|
|
" if x == hex_elem:\n",
|
|
" acc.append(1)\n",
|
|
" else:\n",
|
|
" acc.append(0)\n",
|
|
" hex_elem = (elem >> 4) % 16\n",
|
|
" for x in range(16):\n",
|
|
" if x == hex_elem:\n",
|
|
" acc.append(1)\n",
|
|
" else:\n",
|
|
" acc.append(0)\n",
|
|
" return acc\n",
|
|
"\n",
|
|
"# Calculate Manhattan distance between two points.\n",
|
|
"\n",
|
|
"def man_dist(x, y):\n",
|
|
" for a, b in zip(x, y):\n",
|
|
" a_one, a_two = x\n",
|
|
" b_one, b_two = y\n",
|
|
" \n",
|
|
" return (abs(a_one - b_one) + abs(a_two - b_two))\n",
|
|
" \n",
|
|
"# Calculate Manhattan distance between each set of two points in a list.\n",
|
|
" \n",
|
|
"def man_dist_state(x, y):\n",
|
|
" return sum(man_dist(a, b) for a, b in zip(x, y))\n",
|
|
"\n",
|
|
"# Used to format the positions we parsed from our binary input.\n",
|
|
"\n",
|
|
"def format_pos(acc, elem):\n",
|
|
" hex_elem = (elem[1] - (elem[1] >> 4 << 4))\n",
|
|
" if hex_elem == 0:\n",
|
|
" acc.append((hex_elem, (3,3)))\n",
|
|
" else:\n",
|
|
" acc.append((hex_elem, ((15 - ((elem[0]) * 2)) % 4,int((15 - ((elem[0]) * 2)) / 4))))\n",
|
|
" hex_elem = (elem[1] >> 4) % 16\n",
|
|
" if hex_elem == 0:\n",
|
|
" acc.append((hex_elem, (3,3)))\n",
|
|
" else:\n",
|
|
" acc.append((hex_elem, ((15 - ((elem[0]) * 2 + 1)) % 4,int((15 - ((elem[0]) * 2 + 1)) / 4))))\n",
|
|
" \n",
|
|
" return acc\n",
|
|
"\n",
|
|
"# The title of this function is slightly misleading.\n",
|
|
"# I'm simply generating a list of positions that each\n",
|
|
"# puzzle piece in the current parsed state SHOULD be at.\n",
|
|
"# I organize this in order of the pieces as they were\n",
|
|
"# parsed so the two lists line up perfectly.\n",
|
|
"\n",
|
|
"def generate_pos(acc, elem):\n",
|
|
" if(elem[0] == 0):\n",
|
|
" acc.append((3,3))\n",
|
|
" else:\n",
|
|
" acc.append((((elem[0] - 1) % 4), (int((elem[0] - 1)/4))))\n",
|
|
" \n",
|
|
" return acc\n",
|
|
"\n",
|
|
"# Used to format our ending Manhattan distance into a format\n",
|
|
"# that can be compared with our 29 output neurons.\n",
|
|
"\n",
|
|
"def format_man_dist(elem):\n",
|
|
" acc = []\n",
|
|
" for x in range(28, -1, -1):\n",
|
|
" if x == elem:\n",
|
|
" acc.append(1)\n",
|
|
" else:\n",
|
|
" acc.append(0)\n",
|
|
" return acc\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Parsing input"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"source": [
|
|
"## Creating Target data\n",
|
|
"\n",
|
|
"For this cell, I wanted to clearly explain how the script past this point works. Since I don't want to parse all of the binary states from each file that I'm going to use all at once and hold them in RAM, this instead parses 1 binary state at a time (8 bytes, meaning 64 bits, and we ignore the 1st 4 bits), does the calculations and input formatting needed, and appends the end result per state to a list to be used later."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"target = []\n",
|
|
"\n",
|
|
"for i in range(29):\n",
|
|
" filename = join('data', str(i) + '.bin')\n",
|
|
" \n",
|
|
" # Debugging to print the current file from which states are being parsed.\n",
|
|
" #print(i)\n",
|
|
" temp = []\n",
|
|
" \n",
|
|
" with open(filename, 'rb') as f:\n",
|
|
" data = f.read(8)\n",
|
|
" counter = 0\n",
|
|
"\n",
|
|
" while(data and counter < 1000):\n",
|
|
" temp.append(format_man_dist(i))\n",
|
|
" \n",
|
|
" data = f.read(8)\n",
|
|
" counter += 1\n",
|
|
" \n",
|
|
" target.append(temp)\n",
|
|
" \n",
|
|
"#print(target[28][500])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"source": [
|
|
"# Parsing and Training the neural network"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Setting up the neural network\n",
|
|
"\n",
|
|
"Before we run our training/parsing, we need to make sure Keras has configured the neural network properly to our specifications."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 4,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Sets up a Sequential model, Sequential is all\n",
|
|
"# that should need to be used for this project,\n",
|
|
"# considering that it will only be dealing with\n",
|
|
"# a linear stack of layers of neurons.\n",
|
|
"\n",
|
|
"model = Sequential()\n",
|
|
"\n",
|
|
"# Adding layers to the model.\n",
|
|
"\n",
|
|
"model.add(Dense(units=240, activation='tanh', input_dim=240))\n",
|
|
"model.add(Dense(units=120, activation='tanh'))\n",
|
|
"model.add(Dense(units=60, activation='tanh'))\n",
|
|
"model.add(Dense(units=29, activation='sigmoid'))\n",
|
|
"\n",
|
|
"# Configure the learning process.\n",
|
|
"\n",
|
|
"model.compile(optimizer='sgd',\n",
|
|
" loss='mean_squared_error',\n",
|
|
" metrics=['accuracy'])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Training the neural network\n",
|
|
"\n",
|
|
"Again, this part has the parsing and training in hand, since as we parse and format the states from our file input, we feed those batches to our neural network to train with."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"0\n",
|
|
"Epoch 1/8\n",
|
|
"1/1 [==============================] - 0s 474ms/step - loss: 0.2470 - acc: 0.0000e+00\n",
|
|
"Epoch 2/8\n",
|
|
"1/1 [==============================] - 0s 2ms/step - loss: 0.2458 - acc: 0.0000e+00\n",
|
|
"Epoch 3/8\n",
|
|
"1/1 [==============================] - 0s 2ms/step - loss: 0.2446 - acc: 0.0000e+00\n",
|
|
"Epoch 4/8\n",
|
|
"1/1 [==============================] - 0s 1ms/step - loss: 0.2435 - acc: 0.0000e+00\n",
|
|
"Epoch 5/8\n",
|
|
"1/1 [==============================] - 0s 2ms/step - loss: 0.2423 - acc: 0.0000e+00\n",
|
|
"Epoch 6/8\n",
|
|
"1/1 [==============================] - 0s 2ms/step - loss: 0.2411 - acc: 0.0000e+00\n",
|
|
"Epoch 7/8\n",
|
|
"1/1 [==============================] - 0s 2ms/step - loss: 0.2399 - acc: 0.0000e+00\n",
|
|
"Epoch 8/8\n",
|
|
"1/1 [==============================] - 0s 2ms/step - loss: 0.2388 - acc: 0.0000e+00\n",
|
|
"1\n",
|
|
"Epoch 1/8\n",
|
|
"2/2 [==============================] - 0s 827us/step - loss: 0.2530 - acc: 0.0000e+00\n",
|
|
"Epoch 2/8\n",
|
|
"2/2 [==============================] - 0s 1ms/step - loss: 0.2519 - acc: 0.0000e+00\n",
|
|
"Epoch 3/8\n",
|
|
"2/2 [==============================] - 0s 3ms/step - loss: 0.2508 - acc: 0.0000e+00\n",
|
|
"Epoch 4/8\n",
|
|
"2/2 [==============================] - 0s 1ms/step - loss: 0.2497 - acc: 0.0000e+00\n",
|
|
"Epoch 5/8\n",
|
|
"2/2 [==============================] - 0s 782us/step - loss: 0.2487 - acc: 0.0000e+00\n",
|
|
"Epoch 6/8\n",
|
|
"2/2 [==============================] - 0s 1000us/step - loss: 0.2476 - acc: 0.0000e+00\n",
|
|
"Epoch 7/8\n",
|
|
"2/2 [==============================] - 0s 813us/step - loss: 0.2466 - acc: 0.0000e+00\n",
|
|
"Epoch 8/8\n",
|
|
"2/2 [==============================] - 0s 740us/step - loss: 0.2456 - acc: 0.0000e+00\n",
|
|
"2\n",
|
|
"Epoch 1/8\n",
|
|
"4/4 [==============================] - 0s 475us/step - loss: 0.2426 - acc: 0.0000e+00\n",
|
|
"Epoch 2/8\n",
|
|
"4/4 [==============================] - 0s 721us/step - loss: 0.2416 - acc: 0.0000e+00\n",
|
|
"Epoch 3/8\n",
|
|
"4/4 [==============================] - 0s 403us/step - loss: 0.2407 - acc: 0.0000e+00\n",
|
|
"Epoch 4/8\n",
|
|
"4/4 [==============================] - 0s 612us/step - loss: 0.2397 - acc: 0.0000e+00\n",
|
|
"Epoch 5/8\n",
|
|
"4/4 [==============================] - 0s 905us/step - loss: 0.2388 - acc: 0.0000e+00\n",
|
|
"Epoch 6/8\n",
|
|
"4/4 [==============================] - 0s 590us/step - loss: 0.2379 - acc: 0.0000e+00\n",
|
|
"Epoch 7/8\n",
|
|
"4/4 [==============================] - 0s 480us/step - loss: 0.2369 - acc: 0.0000e+00\n",
|
|
"Epoch 8/8\n",
|
|
"4/4 [==============================] - 0s 378us/step - loss: 0.2360 - acc: 0.0000e+00\n",
|
|
"3\n",
|
|
"Epoch 1/8\n",
|
|
"10/10 [==============================] - 0s 194us/step - loss: 0.2350 - acc: 0.0000e+00\n",
|
|
"Epoch 2/8\n",
|
|
"10/10 [==============================] - 0s 203us/step - loss: 0.2342 - acc: 0.0000e+00\n",
|
|
"Epoch 3/8\n",
|
|
"10/10 [==============================] - 0s 280us/step - loss: 0.2335 - acc: 0.0000e+00\n",
|
|
"Epoch 4/8\n",
|
|
"10/10 [==============================] - 0s 241us/step - loss: 0.2327 - acc: 0.0000e+00\n",
|
|
"Epoch 5/8\n",
|
|
"10/10 [==============================] - 0s 202us/step - loss: 0.2319 - acc: 0.0000e+00\n",
|
|
"Epoch 6/8\n",
|
|
"10/10 [==============================] - 0s 185us/step - loss: 0.2312 - acc: 0.0000e+00\n",
|
|
"Epoch 7/8\n",
|
|
"10/10 [==============================] - 0s 200us/step - loss: 0.2304 - acc: 0.0000e+00\n",
|
|
"Epoch 8/8\n",
|
|
"10/10 [==============================] - 0s 254us/step - loss: 0.2297 - acc: 0.0000e+00\n",
|
|
"4\n",
|
|
"Epoch 1/8\n",
|
|
"24/24 [==============================] - 0s 103us/step - loss: 0.2265 - acc: 0.0417\n",
|
|
"Epoch 2/8\n",
|
|
"24/24 [==============================] - 0s 81us/step - loss: 0.2259 - acc: 0.0417\n",
|
|
"Epoch 3/8\n",
|
|
"24/24 [==============================] - 0s 88us/step - loss: 0.2253 - acc: 0.0417\n",
|
|
"Epoch 4/8\n",
|
|
"24/24 [==============================] - 0s 74us/step - loss: 0.2247 - acc: 0.0417\n",
|
|
"Epoch 5/8\n",
|
|
"24/24 [==============================] - 0s 79us/step - loss: 0.2241 - acc: 0.0417\n",
|
|
"Epoch 6/8\n",
|
|
"24/24 [==============================] - 0s 95us/step - loss: 0.2235 - acc: 0.0417\n",
|
|
"Epoch 7/8\n",
|
|
"24/24 [==============================] - 0s 92us/step - loss: 0.2229 - acc: 0.0417\n",
|
|
"Epoch 8/8\n",
|
|
"24/24 [==============================] - 0s 87us/step - loss: 0.2223 - acc: 0.0417\n",
|
|
"5\n",
|
|
"Epoch 1/8\n",
|
|
"54/54 [==============================] - 0s 31us/step - loss: 0.2333 - acc: 0.0000e+00\n",
|
|
"Epoch 2/8\n",
|
|
"54/54 [==============================] - 0s 32us/step - loss: 0.2327 - acc: 0.0000e+00\n",
|
|
"Epoch 3/8\n",
|
|
"54/54 [==============================] - 0s 32us/step - loss: 0.2322 - acc: 0.0000e+00\n",
|
|
"Epoch 4/8\n",
|
|
"54/54 [==============================] - 0s 40us/step - loss: 0.2316 - acc: 0.0000e+00\n",
|
|
"Epoch 5/8\n",
|
|
"54/54 [==============================] - 0s 36us/step - loss: 0.2310 - acc: 0.0000e+00\n",
|
|
"Epoch 6/8\n",
|
|
"54/54 [==============================] - 0s 144us/step - loss: 0.2304 - acc: 0.0000e+00\n",
|
|
"Epoch 7/8\n",
|
|
"54/54 [==============================] - 0s 41us/step - loss: 0.2299 - acc: 0.0000e+00\n",
|
|
"Epoch 8/8\n",
|
|
"54/54 [==============================] - 0s 91us/step - loss: 0.2293 - acc: 0.0000e+00\n",
|
|
"6\n",
|
|
"Epoch 1/8\n",
|
|
"107/107 [==============================] - 0s 16us/step - loss: 0.2261 - acc: 0.0467\n",
|
|
"Epoch 2/8\n",
|
|
"107/107 [==============================] - 0s 15us/step - loss: 0.2255 - acc: 0.0467\n",
|
|
"Epoch 3/8\n",
|
|
"107/107 [==============================] - 0s 18us/step - loss: 0.2250 - acc: 0.0654\n",
|
|
"Epoch 4/8\n",
|
|
"107/107 [==============================] - 0s 25us/step - loss: 0.2245 - acc: 0.0654\n",
|
|
"Epoch 5/8\n",
|
|
"107/107 [==============================] - 0s 29us/step - loss: 0.2240 - acc: 0.0654\n",
|
|
"Epoch 6/8\n",
|
|
"107/107 [==============================] - 0s 23us/step - loss: 0.2235 - acc: 0.0654\n",
|
|
"Epoch 7/8\n",
|
|
"107/107 [==============================] - 0s 19us/step - loss: 0.2230 - acc: 0.0654\n",
|
|
"Epoch 8/8\n",
|
|
"107/107 [==============================] - 0s 16us/step - loss: 0.2225 - acc: 0.0654\n",
|
|
"7\n",
|
|
"Epoch 1/8\n",
|
|
"222/222 [==============================] - 0s 8us/step - loss: 0.2266 - acc: 0.0000e+00\n",
|
|
"Epoch 2/8\n",
|
|
"222/222 [==============================] - 0s 10us/step - loss: 0.2261 - acc: 0.0000e+00\n",
|
|
"Epoch 3/8\n",
|
|
"222/222 [==============================] - 0s 9us/step - loss: 0.2257 - acc: 0.0000e+00\n",
|
|
"Epoch 4/8\n",
|
|
"222/222 [==============================] - 0s 9us/step - loss: 0.2252 - acc: 0.0000e+00\n",
|
|
"Epoch 5/8\n",
|
|
"222/222 [==============================] - 0s 11us/step - loss: 0.2247 - acc: 0.0000e+00\n",
|
|
"Epoch 6/8\n",
|
|
"222/222 [==============================] - 0s 11us/step - loss: 0.2243 - acc: 0.0000e+00\n",
|
|
"Epoch 7/8\n",
|
|
"222/222 [==============================] - 0s 8us/step - loss: 0.2238 - acc: 0.0000e+00\n",
|
|
"Epoch 8/8\n",
|
|
"222/222 [==============================] - 0s 12us/step - loss: 0.2233 - acc: 0.0000e+00\n",
|
|
"8\n",
|
|
"Epoch 1/8\n",
|
|
"496/496 [==============================] - 0s 5us/step - loss: 0.2231 - acc: 0.0786\n",
|
|
"Epoch 2/8\n",
|
|
"496/496 [==============================] - 0s 5us/step - loss: 0.2227 - acc: 0.0806\n",
|
|
"Epoch 3/8\n",
|
|
"496/496 [==============================] - 0s 6us/step - loss: 0.2223 - acc: 0.0827\n",
|
|
"Epoch 4/8\n",
|
|
"496/496 [==============================] - 0s 6us/step - loss: 0.2219 - acc: 0.0867\n",
|
|
"Epoch 5/8\n",
|
|
"496/496 [==============================] - 0s 4us/step - loss: 0.2214 - acc: 0.0887\n",
|
|
"Epoch 6/8\n",
|
|
"496/496 [==============================] - 0s 5us/step - loss: 0.2210 - acc: 0.0887\n",
|
|
"Epoch 7/8\n",
|
|
"496/496 [==============================] - 0s 5us/step - loss: 0.2206 - acc: 0.0887\n",
|
|
"Epoch 8/8\n",
|
|
"496/496 [==============================] - 0s 7us/step - loss: 0.2202 - acc: 0.0907\n",
|
|
"9\n"
|
|
]
|
|
},
|
|
{
|
|
"ename": "ValueError",
|
|
"evalue": "Input arrays should have the same number of samples as target arrays. Found 1151 input samples and 1000 target samples.",
|
|
"output_type": "error",
|
|
"traceback": [
|
|
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
|
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
|
|
"\u001b[0;32m<ipython-input-5-2d935fb7ef0f>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[0;31m# Train the network.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 24\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 25\u001b[0;31m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtraining\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtarget\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mepochs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m8\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m2000\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 26\u001b[0m \u001b[0;31m#model.train_on_batch(np.array(temp), np.array(target))\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
|
"\u001b[0;32m/usr/lib/python3.6/site-packages/keras/models.py\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)\u001b[0m\n\u001b[1;32m 958\u001b[0m \u001b[0minitial_epoch\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minitial_epoch\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 959\u001b[0m \u001b[0msteps_per_epoch\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msteps_per_epoch\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 960\u001b[0;31m validation_steps=validation_steps)\n\u001b[0m\u001b[1;32m 961\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 962\u001b[0m def evaluate(self, x, y, batch_size=32, verbose=1,\n",
|
|
"\u001b[0;32m/usr/lib/python3.6/site-packages/keras/engine/training.py\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)\u001b[0m\n\u001b[1;32m 1572\u001b[0m \u001b[0mclass_weight\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mclass_weight\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1573\u001b[0m \u001b[0mcheck_batch_axis\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1574\u001b[0;31m batch_size=batch_size)\n\u001b[0m\u001b[1;32m 1575\u001b[0m \u001b[0;31m# Prepare validation data.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1576\u001b[0m \u001b[0mdo_validation\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
|
"\u001b[0;32m/usr/lib/python3.6/site-packages/keras/engine/training.py\u001b[0m in \u001b[0;36m_standardize_user_data\u001b[0;34m(self, x, y, sample_weight, class_weight, check_batch_axis, batch_size)\u001b[0m\n\u001b[1;32m 1417\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mref\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msw\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcw\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1418\u001b[0m in zip(y, sample_weights, class_weights, self._feed_sample_weight_modes)]\n\u001b[0;32m-> 1419\u001b[0;31m \u001b[0m_check_array_lengths\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msample_weights\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1420\u001b[0m _check_loss_and_target_compatibility(y,\n\u001b[1;32m 1421\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_feed_loss_fns\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
|
"\u001b[0;32m/usr/lib/python3.6/site-packages/keras/engine/training.py\u001b[0m in \u001b[0;36m_check_array_lengths\u001b[0;34m(inputs, targets, weights)\u001b[0m\n\u001b[1;32m 248\u001b[0m \u001b[0;34m'the same number of samples as target arrays. '\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 249\u001b[0m \u001b[0;34m'Found '\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlist\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mset_x\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m' input samples '\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 250\u001b[0;31m 'and ' + str(list(set_y)[0]) + ' target samples.')\n\u001b[0m\u001b[1;32m 251\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mset_w\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 252\u001b[0m raise ValueError('All sample_weight arrays should have '\n",
|
|
"\u001b[0;31mValueError\u001b[0m: Input arrays should have the same number of samples as target arrays. Found 1151 input samples and 1000 target samples."
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"for i in range(29):\n",
|
|
" filename = join('data', str(i) + '.bin')\n",
|
|
" \n",
|
|
" # Debugging to print the current file from which states are being parsed.\n",
|
|
" print(i)\n",
|
|
" \n",
|
|
" with open(filename, 'rb') as f:\n",
|
|
" data = f.read(8)\n",
|
|
" counter = 0\n",
|
|
" training = []\n",
|
|
"\n",
|
|
" while(data and counter < 2000):\n",
|
|
" bin_data = reduce(format_input, list(data), [])\n",
|
|
" bin_data.reverse()\n",
|
|
" bin_data = bin_data[16:]\n",
|
|
" \n",
|
|
" training.append(bin_data)\n",
|
|
"\n",
|
|
" data = f.read(8)\n",
|
|
" counter += 1\n",
|
|
"\n",
|
|
" #print(training[0])\n",
|
|
" # Train the network.\n",
|
|
"\n",
|
|
" model.fit(np.array(training), np.array(target[i]), epochs=8, batch_size=2000)\n",
|
|
" #model.train_on_batch(np.array(temp), np.array(target))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Testing the neural network"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"source": [
|
|
"## Creating the testing target data\n",
|
|
"\n",
|
|
"Similarly, here is a separate cell that just parses and creates the necessary target and testing arrays that we will need to test the neural network using Keras."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 7,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"18\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"# Used for testing data\n",
|
|
"\n",
|
|
"i = 18\n",
|
|
"\n",
|
|
"filename = join('data', str(i) + '.bin')\n",
|
|
"\n",
|
|
"# Debugging to print the current file from which states are being parsed.\n",
|
|
"print(i)\n",
|
|
"\n",
|
|
"with open(filename, 'rb') as f:\n",
|
|
"\n",
|
|
" for i in range(2000):\n",
|
|
" data = f.read(8)\n",
|
|
"\n",
|
|
" data = f.read(8)\n",
|
|
"\n",
|
|
" counter = 0\n",
|
|
"\n",
|
|
" testing = []\n",
|
|
"\n",
|
|
" testing_target = []\n",
|
|
"\n",
|
|
" while(data and counter < 10000):\n",
|
|
" bin_data = reduce(format_input, list(data), [])\n",
|
|
" bin_data.reverse()\n",
|
|
" bin_data = bin_data[16:]\n",
|
|
"\n",
|
|
" testing.append(bin_data)\n",
|
|
"\n",
|
|
" pos_data = reduce(format_pos, enumerate(list(data)), [])\n",
|
|
" pos_data.reverse()\n",
|
|
" pos_data = pos_data[1:]\n",
|
|
"\n",
|
|
" state_pos = []\n",
|
|
"\n",
|
|
" for p in pos_data:\n",
|
|
" state_pos.append(p[1])\n",
|
|
"\n",
|
|
" testing_target_pos = reduce(generate_pos, pos_data, [])\n",
|
|
"\n",
|
|
" testing_target.append(format_man_dist(man_dist_state(state_pos, testing_target_pos)))\n",
|
|
"\n",
|
|
" counter += 1\n",
|
|
" data = f.read(8)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Evaluating our test data"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 8,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"10000/10000 [==============================] - 0s 2us/step\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"# Evaluate accuracy\n",
|
|
" \n",
|
|
"loss_and_metrics = model.evaluate(np.array(testing),np.array(testing_target), batch_size=1000)\n",
|
|
" \n",
|
|
"# Generating predictions:\n",
|
|
" \n",
|
|
"predictions = model.predict(np.array(testing), batch_size=1000)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 9,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"52.7983333333\n",
|
|
"[0.23647564649581909, 0.039100000075995921]\n",
|
|
"['loss', 'acc']\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"output = []\n",
|
|
"\n",
|
|
"for p in range(len(predictions)):\n",
|
|
" if np.argmax(testing_target[p]) < 18:\n",
|
|
" output.append(100*((18 - (28 - np.argmax(predictions[p]))) / (18 - np.argmax(testing_target[p]))))\n",
|
|
" else:\n",
|
|
" output.append(0)\n",
|
|
" \n",
|
|
"#for i in range(len(output)):\n",
|
|
"# print(output[i])\n",
|
|
"\n",
|
|
"print(np.array(output).mean())\n",
|
|
"\n",
|
|
"print(loss_and_metrics)\n",
|
|
"\n",
|
|
"print(model.metrics_names)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.6.3"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 2
|
|
}
|