Adapt local search algorithm to data structure
This commit is contained in:
parent
d82fe81f78
commit
acb9b35c7a
|
@ -1,4 +1,4 @@
|
|||
from numpy.random import choice, seed
|
||||
from numpy.random import choice, seed, randint
|
||||
from pandas import DataFrame
|
||||
|
||||
|
||||
|
@ -32,42 +32,37 @@ def get_first_random_solution(n, m, data):
|
|||
return solution
|
||||
|
||||
|
||||
def evaluate_element_swap(solution, old_element, new_element, data):
|
||||
pass
|
||||
|
||||
|
||||
def element_in_dataframe(solution, element):
|
||||
duplicates = solution.query(
|
||||
f"(source == {element.source} and destination == {element.destination}) or \
|
||||
(source == {element.destination} and destination == {element.source})"
|
||||
)
|
||||
duplicates = solution.query(f"point == {element}")
|
||||
return not duplicates.empty
|
||||
|
||||
|
||||
def replace_worst_element(previous, data):
|
||||
def replace_worst_element(previous, n, data):
|
||||
solution = previous.copy()
|
||||
worst_index = solution["distance"].astype(float).idxmin()
|
||||
random_element = data.sample().squeeze()
|
||||
random_element = randint(n)
|
||||
while element_in_dataframe(solution=solution, element=random_element):
|
||||
random_element = data.sample().squeeze()
|
||||
solution.loc[worst_index] = random_element
|
||||
random_element = randint(n)
|
||||
solution["point"].loc[worst_index] = random_element
|
||||
solution["distance"].loc[worst_index] = compute_distance(
|
||||
element=solution["point"].loc[worst_index], solution=solution, data=data
|
||||
)
|
||||
return solution
|
||||
|
||||
|
||||
def get_random_solution(previous, data):
|
||||
solution, worst_index = replace_worst_element(previous, data)
|
||||
previous_worst_distance = previous["distance"].loc[worst_index]
|
||||
while solution.distance.loc[worst_index] <= previous_worst_distance:
|
||||
solution = replace_worst_element(previous=solution, data=data)
|
||||
def get_random_solution(previous, n, data):
|
||||
solution = replace_worst_element(previous, n, data)
|
||||
while solution["distance"].sum() <= previous["distance"].sum():
|
||||
solution = replace_worst_element(previous=solution, n=n, data=data)
|
||||
return solution
|
||||
|
||||
|
||||
def explore_neighbourhood(element, data, max_iterations=100000):
|
||||
def explore_neighbourhood(element, n, data, max_iterations=100000):
|
||||
neighbourhood = []
|
||||
neighbourhood.append(element)
|
||||
for _ in range(max_iterations):
|
||||
previous_solution = neighbourhood[-1]
|
||||
neighbour = get_random_solution(previous=previous_solution, data=data)
|
||||
neighbour = get_random_solution(previous=previous_solution, n=n, data=data)
|
||||
neighbourhood.append(neighbour)
|
||||
return neighbour
|
||||
|
||||
|
@ -75,6 +70,6 @@ def explore_neighbourhood(element, data, max_iterations=100000):
|
|||
def local_search(n, m, data):
|
||||
first_solution = get_first_random_solution(n, m, data)
|
||||
best_solution = explore_neighbourhood(
|
||||
element=first_solution, data=data, max_iterations=100
|
||||
element=first_solution, n=n, data=data, max_iterations=100
|
||||
)
|
||||
return best_solution
|
||||
|
|
10
src/main.py
10
src/main.py
|
@ -1,6 +1,6 @@
|
|||
from preprocessing import parse_file
|
||||
from greedy import greedy_algorithm
|
||||
from local_search import local_search
|
||||
from local_search import local_search, get_row_distance
|
||||
from sys import argv
|
||||
from time import time
|
||||
from itertools import combinations
|
||||
|
@ -16,14 +16,6 @@ def execute_algorithm(choice, n, m, data):
|
|||
exit(1)
|
||||
|
||||
|
||||
def get_row_distance(source, destination, data):
|
||||
row = data.query(
|
||||
"""(source == @source and destination == @destination) or \
|
||||
(source == @destination and destination == @source)"""
|
||||
)
|
||||
return row["distance"].values[0]
|
||||
|
||||
|
||||
def get_fitness(solutions, data):
|
||||
accumulator = 0
|
||||
comb = combinations(solutions.index, r=2)
|
||||
|
|
Loading…
Reference in New Issue