Should be feature complete. Onto debugging and extracting data.

This commit is contained in:
Alex Huddleston 2017-12-02 17:08:16 +00:00
parent 2ba5366c4c
commit 5e2d49fa69

View file

@ -2,51 +2,23 @@
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
}
],
"outputs": [],
"source": [
"# Setting up our imported libraries.\n",
"from functools import reduce\n",
"import numpy as np\n",
"from keras.models import Sequential\n",
"from keras.layers import Dense"
"from keras.layers import Dense\n",
"from os.path import join"
]
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"240000\n",
"240\n",
"[[ 0.28588903 0.05308564 0.99171479 ..., 0.92657084 0.09114427\n",
" 0.76495161]\n",
" [ 0.50998915 0.74032164 0.04898317 ..., 0.77742777 0.46720853\n",
" 0.01731216]\n",
" [ 0.31522802 0.11448062 0.40291163 ..., 0.87519373 0.31255597\n",
" 0.7202333 ]\n",
" ..., \n",
" [ 0.13906598 0.99536312 0.36709839 ..., 0.68740262 0.9536678\n",
" 0.53053495]\n",
" [ 0.13696298 0.91392043 0.5846018 ..., 0.84365665 0.92837426\n",
" 0.18738981]\n",
" [ 0.05775272 0.7919279 0.51444914 ..., 0.53078037 0.67684536\n",
" 0.25327729]]\n"
]
}
],
"outputs": [],
"source": [
"# Generating dummy data.\n",
"\n",
@ -58,36 +30,9 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/5\n",
"1000/1000 [==============================] - 4s 4ms/step - loss: 0.0986 - acc: 0.0250\n",
"Epoch 2/5\n",
"1000/1000 [==============================] - 0s 155us/step - loss: 0.0936 - acc: 0.0350\n",
"Epoch 3/5\n",
"1000/1000 [==============================] - 0s 157us/step - loss: 0.0912 - acc: 0.0380\n",
"Epoch 4/5\n",
"1000/1000 [==============================] - 0s 154us/step - loss: 0.0901 - acc: 0.0370\n",
"Epoch 5/5\n",
"1000/1000 [==============================] - 0s 162us/step - loss: 0.0894 - acc: 0.0370\n"
]
},
{
"data": {
"text/plain": [
"<keras.callbacks.History at 0x7fe4fae027f0>"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"# Sets up a Sequential model, Sequential is all\n",
"# that should need to be used for this project,\n",
@ -106,35 +51,209 @@
"\n",
"model.compile(optimizer='sgd',\n",
" loss='mean_squared_error',\n",
" metrics=['accuracy'])\n",
"\n",
"# Train the network.\n",
"\n",
"model.fit(data, output, epochs=5, batch_size=10)\n",
"\n",
"# Generating predictions should look like this:\n",
"\n",
"# predictions = model.predict(testing_data, batch_size=10)"
" metrics=['accuracy'])"
]
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"0xf23456789abcdef0\n"
]
}
],
"outputs": [],
"source": [
"with open('data/0.bin', 'rb') as f:\n",
" data = f.read()\n",
"\n",
"print(hex(int.from_bytes(data, byteorder='little', signed=False)))"
"def format_input(acc, elem):\n",
" hex_elem = (elem - (elem >> 4 << 4))\n",
" for x in range(16):\n",
" if x == hex_elem:\n",
" acc.append(1)\n",
" else:\n",
" acc.append(0)\n",
" hex_elem = (elem >> 4) % 16\n",
" for x in range(16):\n",
" if x == hex_elem:\n",
" acc.append(1)\n",
" else:\n",
" acc.append(0)\n",
" return acc\n",
"\n",
"with open('data/0.bin', 'rb') as f:\n",
" data = f.read(8)\n",
" counter = 0\n",
"\n",
" while(data):\n",
" bin_data = reduce(format_input, list(data), [])\n",
" bin_data.reverse()\n",
" bin_data = bin_data[16:]\n",
" print(bin_data)\n",
"\n",
" print(counter)\n",
" \n",
" for i in range(int(len(bin_data)/240)):\n",
" for x in range((i*16),(i*16) + 15):\n",
" temp = []\n",
" for y in range(16):\n",
" temp.append(bin_data[(x*16) + y])\n",
" print(temp)\n",
" \n",
" data = f.read(8)\n",
" counter += 1"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": true
},
"source": [
"I'm going to need to calculate Manhattan Distances for each of the states at some point.\n",
"\n",
"This website might be helpful for that formula:\n",
"https://heuristicswiki.wikispaces.com/Manhattan+Distance"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def man_dist(x, y):\n",
" for a, b in zip(x, y):\n",
" a_one, a_two = x\n",
" b_one, b_two = y\n",
" \n",
" return (abs(a_one - b_one) + abs(a_two - b_two))\n",
" \n",
"def man_dist_state(x, y):\n",
" return sum(man_dist(a, b) for a, b in zip(x, y))\n",
"\n",
"def format_pos(acc, elem):\n",
" hex_elem = (elem[1] - (elem[1] >> 4 << 4))\n",
" if hex_elem == 0:\n",
" acc.append((hex_elem, (3,3)))\n",
" else:\n",
" acc.append((hex_elem, ((15 - ((elem[0]) * 2)) % 4,int((15 - ((elem[0]) * 2)) / 4))))\n",
" hex_elem = (elem[1] >> 4) % 16\n",
" if hex_elem == 0:\n",
" acc.append((hex_elem, (3,3)))\n",
" else:\n",
" acc.append((hex_elem, ((15 - ((elem[0]) * 2 + 1)) % 4,int((15 - ((elem[0]) * 2 + 1)) / 4))))\n",
" \n",
" return acc\n",
"\n",
"def generate_pos(acc, elem):\n",
" if(elem[0] == 0):\n",
" acc.append((3,3))\n",
" else:\n",
" acc.append((((elem[0] - 1) % 4), (int((elem[0] - 1)/4))))\n",
" \n",
" return acc\n",
"\n",
"def format_man_dist(elem):\n",
" acc = []\n",
" for x in range(28, -1, -1):\n",
" if x == elem:\n",
" acc.append(1)\n",
" else:\n",
" acc.append(0)\n",
" return acc\n",
"\n",
"with open('data/0.bin', 'rb') as f:\n",
" data = f.read(8)\n",
" counter = 0\n",
" \n",
" while(data):\n",
" pos_data = reduce(format_pos, enumerate(list(data)), [])\n",
" pos_data.reverse()\n",
" pos_data = pos_data[1:]\n",
" \n",
" state_pos = []\n",
" \n",
" for p in pos_data:\n",
" state_pos.append(p[1])\n",
" \n",
" target_pos = reduce(generate_pos, pos_data, [])\n",
" \n",
" print(format_man_dist(man_dist_state(state_pos, target_pos)))\n",
" \n",
" data = f.read(8)\n",
" counter += 1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for i in range(29):\n",
" filename = join('data', str(i) + '.bin')\n",
" \n",
" print(i)\n",
" \n",
" with open(filename, 'rb') as f:\n",
" data = f.read(8)\n",
" counter = 0\n",
"\n",
" while(data and counter < 1000):\n",
" bin_data = reduce(format_input, list(data), [])\n",
" bin_data.reverse()\n",
" bin_data = bin_data[16:]\n",
"\n",
" pos_data = reduce(format_pos, enumerate(list(data)), [])\n",
" pos_data.reverse()\n",
"\n",
" state_pos = []\n",
"\n",
" for p in pos_data:\n",
" state_pos.append(p[1])\n",
"\n",
" target_pos = reduce(generate_pos, pos_data, [])\n",
"\n",
" #for i in range(int(len(bin_data)/256)):\n",
" # for x in range((i*16) + 1,(i*16) + 16):\n",
" # temp = []\n",
" # for y in range(16):\n",
" # temp.append(bin_data[(x*16) + y])\n",
" # print(temp)\n",
"\n",
" target = []\n",
" target.append(format_man_dist(man_dist_state(state_pos, target_pos)))\n",
"\n",
" temp = []\n",
" temp.append(bin_data)\n",
"\n",
" # Train the network.\n",
"\n",
" #model.fit(data, output, epochs=5, batch_size=10)\n",
" model.train_on_batch(np.array(temp), np.array(target))\n",
"\n",
" data = f.read(8)\n",
" counter += 1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"with open('data/2.bin', 'rb') as f:\n",
" data = f.read(8)\n",
" counter = 0\n",
" \n",
" while(data):\n",
" bin_data = reduce(format_input, list(data), [])\n",
" bin_data.reverse()\n",
" bin_data = bin_data[16:]\n",
" \n",
" temp = []\n",
" temp.append(bin_data)\n",
"\n",
" # Generating predictions:\n",
"\n",
" predictions = model.predict(np.array(temp), batch_size=1)\n",
" print(predictions)"
]
},
{