diff --git a/src/processing.py b/src/processing.py index b39d140..c9f44b5 100644 --- a/src/processing.py +++ b/src/processing.py @@ -29,20 +29,22 @@ def get_furthest_element(element, data): furthest_index = element_df["distance"].astype(float).idxmax() furthest_row = data.iloc[furthest_index] furthest_point = get_different_element(original=element, row=furthest_row) - furthest_element = {"point": furthest_point, "distance": furthest_row["distance"]} - return furthest_element, furthest_index + return {"point": furthest_point, "distance": furthest_row["distance"]} + + +def remove_solution_dataset(data, solution): + return data.query(f"source != {solution} and destination != {solution}") -# FIXME Remove duplicated elements properly def greedy_algorithm(n, m, data): solutions = DataFrame(columns=["point", "distance"]) first_solution = get_first_solution(n, data) solutions = solutions.append(first_solution, ignore_index=True) for _ in range(m): - last_solution = solutions["point"].tail(n=1) - centroid, index = get_furthest_element(element=int(last_solution), data=data) + last_solution = int(solutions["point"].tail(n=1)) + centroid = get_furthest_element(element=last_solution, data=data) solutions = solutions.append(dict(centroid), ignore_index=True) - data = data.drop(index) + data = remove_solution_dataset(data=data, solution=last_solution) return solutions @@ -72,9 +74,11 @@ def execute_algorithm(choice, n, m, data): def show_results(solutions): distance_sum = solutions["distance"].sum() + duplicates = solutions.duplicated() print(solutions) print("Total distance: " + str(distance_sum)) - print(solutions.duplicated()) + if solutions[duplicates].empty: + print("No duplicates found") def usage(argv):