diff --git a/neural_network.py b/neural_network.py index d045de8..d3c525d 100644 --- a/neural_network.py +++ b/neural_network.py @@ -118,8 +118,8 @@ model.add(Dense(units=29, activation='sigmoid')) # Configure the learning process. model.compile(optimizer='sgd', - loss='mean_squared_error', - metrics=['accuracy']) + loss='mean_squared_error', + metrics=['accuracy']) for i in range(29): @@ -158,41 +158,41 @@ for i in range(11, 29): with open('/pub/faculty_share/daugher/datafiles/data/18states.bin', 'rb') as f: - for i in range(2000): - data = f.read(8) + for i in range(2000): + data = f.read(8) - data = f.read(8) + data = f.read(8) - counter = 0 + counter = 0 - testing = [] + testing = [] - testing_target = [] + testing_target = [] - while(data): - bin_data = reduce(format_input, list(data), []) - bin_data.reverse() - bin_data = bin_data[16:] + while(data): + bin_data = reduce(format_input, list(data), []) + bin_data.reverse() + bin_data = bin_data[16:] - testing.append(bin_data) + testing.append(bin_data) - pos_data = reduce(format_pos, enumerate(list(data)), []) - pos_data.reverse() - pos_data = pos_data[1:] + pos_data = reduce(format_pos, enumerate(list(data)), []) + pos_data.reverse() + pos_data = pos_data[1:] - state_pos = [] + state_pos = [] - for p in pos_data: - state_pos.append(p[1]) + for p in pos_data: + state_pos.append(p[1]) - testing_target_pos = reduce(generate_pos, pos_data, []) + testing_target_pos = reduce(generate_pos, pos_data, []) - testing_target.append(format_man_dist(man_dist_state(state_pos, testing_target_pos))) + testing_target.append(format_man_dist(man_dist_state(state_pos, testing_target_pos))) + + counter += 1 + data = f.read(8) - counter += 1 - data = f.read(8) - # Evaluate accuracy loss_and_metrics = model.evaluate(np.array(testing),np.array(testing_target), batch_size=1000) @@ -200,7 +200,7 @@ for i in range(11, 29): # Generating predictions: predictions = model.predict(np.array(testing), batch_size=1000) - + output = [] for p in range(len(predictions)):