2017-12-01 13:14:32 -06:00
|
|
|
{
|
|
|
|
"cells": [
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
2017-12-01 14:23:13 -06:00
|
|
|
"execution_count": null,
|
|
|
|
"metadata": {
|
|
|
|
"collapsed": true
|
|
|
|
},
|
|
|
|
"outputs": [],
|
2017-12-01 13:14:32 -06:00
|
|
|
"source": [
|
|
|
|
"# If you're really new to python this code might be\n",
|
|
|
|
"# a bit unreadable, but I tried to make it as simple\n",
|
|
|
|
"# as possible.\n",
|
|
|
|
"\n",
|
|
|
|
"# Setting up our imported libraries.\n",
|
|
|
|
"import numpy as np\n",
|
|
|
|
"from keras.models import Sequential\n",
|
|
|
|
"from keras.layers import Dense\n",
|
|
|
|
"\n",
|
|
|
|
"# We're going to use numpy for some easy\n",
|
|
|
|
"# array functionality with keras, and keras\n",
|
|
|
|
"# is our library for handling most of our neural network\n",
|
|
|
|
"# stuff. That being said, you need to have some sort of\n",
|
|
|
|
"# backend for keras to work with, such as the recommended\n",
|
|
|
|
"# TensorFlow, which I'm using here.\n",
|
|
|
|
"# Since keras uses those as a backend, you don't inherently\n",
|
|
|
|
"# need to import it."
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
2017-12-01 14:23:13 -06:00
|
|
|
"execution_count": null,
|
|
|
|
"metadata": {
|
|
|
|
"collapsed": true
|
|
|
|
},
|
|
|
|
"outputs": [],
|
2017-12-01 13:14:32 -06:00
|
|
|
"source": [
|
|
|
|
"# Generating dummy data so I can understand how to input.\n",
|
|
|
|
"data = np.random.random((1000,240))\n",
|
|
|
|
"output = np.random.random((1000, 29))\n",
|
|
|
|
"\n",
|
|
|
|
"# Here's some printouts to see exactly what I'm doing here.\n",
|
|
|
|
"print(data.size)\n",
|
|
|
|
"print(data[0].size)\n",
|
|
|
|
"print(data)"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
2017-12-01 14:23:13 -06:00
|
|
|
"execution_count": null,
|
2017-12-01 13:14:32 -06:00
|
|
|
"metadata": {},
|
2017-12-01 14:23:13 -06:00
|
|
|
"outputs": [],
|
2017-12-01 13:14:32 -06:00
|
|
|
"source": [
|
|
|
|
"# Sets up a Sequential model, Sequential is all\n",
|
|
|
|
"# that should need to be used for this project,\n",
|
|
|
|
"# considering that it will only be dealing with\n",
|
|
|
|
"# a linear stack of layers of neurons.\n",
|
|
|
|
"model = Sequential()\n",
|
|
|
|
"\n",
|
|
|
|
"# Adding layers to the model.\n",
|
|
|
|
"\n",
|
|
|
|
"# Dense is the type of layer I think, don't need\n",
|
|
|
|
"# to look into this more since this is all I should\n",
|
|
|
|
"# need to use.\n",
|
|
|
|
"\n",
|
|
|
|
"# units = the number of neurons for this layer.\n",
|
|
|
|
"\n",
|
|
|
|
"# activation = the activation function for this layer.\n",
|
|
|
|
"# our project doc says to use hyperbolic tangent, so\n",
|
|
|
|
"# I set this to tanh. Except for the output layer,\n",
|
|
|
|
"# which I set to sigmoid.\n",
|
|
|
|
"\n",
|
|
|
|
"# input_dim = the dimension of the input list,\n",
|
|
|
|
"# should only be set for the input layer.\n",
|
|
|
|
"\n",
|
|
|
|
"model.add(Dense(units=240, activation='tanh', input_dim=240))\n",
|
|
|
|
"model.add(Dense(units=120, activation='tanh'))\n",
|
|
|
|
"model.add(Dense(units=29, activation='sigmoid'))\n",
|
|
|
|
"\n",
|
|
|
|
"# Configure the learning process.\n",
|
|
|
|
"# \n",
|
|
|
|
"\n",
|
|
|
|
"# optimizer = I'm just using \n",
|
|
|
|
"# Stomchastic gradient descent for this,\n",
|
|
|
|
"# remember that this uses essentially staggered\n",
|
|
|
|
"# aggregation by step to calculate gradient\n",
|
|
|
|
"# descent towards our target, which is faster\n",
|
|
|
|
"# than doing all of the calculation together.\n",
|
|
|
|
"\n",
|
|
|
|
"# loss = the loss function, currently I'm using\n",
|
|
|
|
"# mean squared error, but might change to\n",
|
|
|
|
"# mean_absolute_percentage_error, considering\n",
|
|
|
|
"# I think we're supposed to calculate cost\n",
|
|
|
|
"# based on the percentage we are away from the\n",
|
|
|
|
"# correct number of moves away from the solved state.\n",
|
|
|
|
"\n",
|
|
|
|
"# metrics = evaluation metrics...\n",
|
|
|
|
"# I think all I care about is accuracy in this case,\n",
|
|
|
|
"# if anything.\n",
|
|
|
|
"\n",
|
|
|
|
"model.compile(optimizer='sgd',\n",
|
|
|
|
" loss='mean_squared_error',\n",
|
|
|
|
" metrics=['accuracy'])\n",
|
|
|
|
"\n",
|
|
|
|
"# This is where we're configuring how we train the network:\n",
|
|
|
|
"\n",
|
|
|
|
"# data = the input sets of training data for this network,\n",
|
|
|
|
"# in my case I'm unsure what exactly that will be.\n",
|
|
|
|
"\n",
|
|
|
|
"# output = the input sets of target data for the network,\n",
|
|
|
|
"# I believe this should just be a set of the same size\n",
|
|
|
|
"# as the training data all containing the number\n",
|
|
|
|
"# of steps until being solved for each state... I think.\n",
|
|
|
|
"\n",
|
|
|
|
"# epochs = it seems like this is how many times this\n",
|
|
|
|
"# training should be run...\n",
|
|
|
|
"\n",
|
|
|
|
"# batch_size = I'm pretty sure this directly correlates\n",
|
|
|
|
"# to how many input sets we train per step.\n",
|
|
|
|
"\n",
|
|
|
|
"model.fit(data, output, epochs=5, batch_size=10)\n",
|
|
|
|
"\n",
|
|
|
|
"# Generating predictions should look like this:\n",
|
|
|
|
"\n",
|
|
|
|
"# predictions = model.predict(testing_data, batch_size=10)\n",
|
|
|
|
"\n",
|
|
|
|
"# I've commented it out since I don't have any real\n",
|
|
|
|
"# data to predict yet."
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
|
|
|
"execution_count": null,
|
|
|
|
"metadata": {
|
|
|
|
"collapsed": true
|
|
|
|
},
|
|
|
|
"outputs": [],
|
|
|
|
"source": []
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"metadata": {
|
|
|
|
"kernelspec": {
|
|
|
|
"display_name": "Python 3",
|
|
|
|
"language": "python",
|
|
|
|
"name": "python3"
|
|
|
|
},
|
|
|
|
"language_info": {
|
|
|
|
"codemirror_mode": {
|
|
|
|
"name": "ipython",
|
|
|
|
"version": 3
|
|
|
|
},
|
|
|
|
"file_extension": ".py",
|
|
|
|
"mimetype": "text/x-python",
|
|
|
|
"name": "python",
|
|
|
|
"nbconvert_exporter": "python",
|
|
|
|
"pygments_lexer": "ipython3",
|
|
|
|
"version": "3.6.3"
|
|
|
|
}
|
|
|
|
},
|
|
|
|
"nbformat": 4,
|
|
|
|
"nbformat_minor": 2
|
|
|
|
}
|