diff --git a/neural_network.py b/neural_network.py index 7eabbc6..d045de8 100644 --- a/neural_network.py +++ b/neural_network.py @@ -81,7 +81,7 @@ def format_man_dist(elem): target = [] for i in range(29): - filename = join('data', str(i) + '.bin') + filename = join('/pub/faculty_share/daugher/datafiles/data/' + str(i) + 'states.bin') # Debugging to print the current file from which states are being parsed. #print(i) @@ -91,7 +91,7 @@ for i in range(29): data = f.read(8) counter = 0 - while(data and counter < 1000): + while(data and counter < 2000): temp.append(format_man_dist(i)) data = f.read(8) @@ -123,7 +123,7 @@ model.compile(optimizer='sgd', for i in range(29): - filename = join('data', str(i) + '.bin') + filename = join('/pub/faculty_share/daugher/datafiles/data/' + str(i) + 'states.bin') # Debugging to print the current file from which states are being parsed. print(i) @@ -133,7 +133,7 @@ for i in range(29): counter = 0 training = [] - while(data and counter < 1000): + while(data and counter < 2000): bin_data = reduce(format_input, list(data), []) bin_data.reverse() bin_data = bin_data[16:] @@ -146,70 +146,74 @@ for i in range(29): #print(training[0]) # Train the network. - model.fit(np.array(training), np.array(target[i]), epochs=5, batch_size=1000) + model.fit(np.array(training), np.array(target[i]), epochs=8, batch_size=2000) #model.train_on_batch(np.array(temp), np.array(target)) # Used for testing data -with open('data/18.bin', 'rb') as f: +for i in range(11, 29): + filename = join('/pub/faculty_share/daugher/datafiles/data/', str(i) + '.bin') - for i in range(1000): - data = f.read(8) + print(i) - data = f.read(8) + with open('/pub/faculty_share/daugher/datafiles/data/18states.bin', 'rb') as f: - counter = 0 + for i in range(2000): + data = f.read(8) - testing = [] + data = f.read(8) - testing_target = [] + counter = 0 - while(data): - bin_data = reduce(format_input, list(data), []) - bin_data.reverse() - bin_data = bin_data[16:] + testing = [] - testing.append(bin_data) + testing_target = [] - pos_data = reduce(format_pos, enumerate(list(data)), []) - pos_data.reverse() - pos_data = pos_data[1:] + while(data): + bin_data = reduce(format_input, list(data), []) + bin_data.reverse() + bin_data = bin_data[16:] - state_pos = [] + testing.append(bin_data) - for p in pos_data: - state_pos.append(p[1]) + pos_data = reduce(format_pos, enumerate(list(data)), []) + pos_data.reverse() + pos_data = pos_data[1:] - testing_target_pos = reduce(generate_pos, pos_data, []) + state_pos = [] - testing_target.append(format_man_dist(man_dist_state(state_pos, testing_target_pos))) + for p in pos_data: + state_pos.append(p[1]) - counter += 1 - data = f.read(8) + testing_target_pos = reduce(generate_pos, pos_data, []) -#print(testing_target) + testing_target.append(format_man_dist(man_dist_state(state_pos, testing_target_pos))) -# Evaluate accuracy + counter += 1 + data = f.read(8) -loss_and_metrics = model.evaluate(np.array(testing),np.array(testing_target), batch_size=1000) + + # Evaluate accuracy -# Generating predictions: + loss_and_metrics = model.evaluate(np.array(testing),np.array(testing_target), batch_size=1000) -predictions = model.predict(np.array(testing), batch_size=1000) + # Generating predictions: -output = [] + predictions = model.predict(np.array(testing), batch_size=1000) + + output = [] -for p in range(len(predictions)): - if np.argmax(testing_target[p]) < 18: - output.append(100*((18 - (28 - np.argmax(predictions[p]))) / (18 - np.argmax(testing_target[p])))) - else: - output.append(0) + for p in range(len(predictions)): + if np.argmax(testing_target[p]) < 18: + output.append(100*((18 - (28 - np.argmax(predictions[p]))) / (18 - np.argmax(testing_target[p])))) + else: + output.append(0) -#for i in range(len(output)): -# print(output[i]) + #for i in range(len(output)): + # print(output[i]) -print(np.array(output).mean()) + print("Percentage possible improvement: ", np.array(output).mean()) -print(loss_and_metrics) + print(model.metrics_names[0], loss_and_metrics[0]) -print(model.metrics_names) + print(model.metrics_names[1], loss_and_metrics[1])