Add modules from other labs

This commit is contained in:
coolneng 2021-06-22 03:35:59 +02:00
parent c7d38c4a7c
commit c62d1213b8
Signed by: coolneng
GPG Key ID: 9893DA236405AF57
3 changed files with 149 additions and 0 deletions

75
src/local_search.py Normal file
View File

@ -0,0 +1,75 @@
from numpy.random import choice, seed, randint
from pandas import DataFrame
def get_row_distance(source, destination, data):
row = data.query(
"""(source == @source and destination == @destination) or \
(source == @destination and destination == @source)"""
)
return row["distance"].values[0]
def compute_distance(element, solution, data):
accumulator = 0
distinct_elements = solution.query(f"point != {element}")
for _, item in distinct_elements.iterrows():
accumulator += get_row_distance(
source=element,
destination=item.point,
data=data,
)
return accumulator
def get_first_random_solution(n, m, data):
solution = DataFrame(columns=["point", "distance"])
seed(42)
solution["point"] = choice(n, size=m, replace=False)
solution["distance"] = solution["point"].apply(
func=compute_distance, solution=solution, data=data
)
return solution
def element_in_dataframe(solution, element):
duplicates = solution.query(f"point == {element}")
return not duplicates.empty
def replace_worst_element(previous, n, data):
solution = previous.copy()
worst_index = solution["distance"].astype(float).idxmin()
random_element = randint(n)
while element_in_dataframe(solution=solution, element=random_element):
random_element = randint(n)
solution["point"].loc[worst_index] = random_element
solution["distance"].loc[worst_index] = compute_distance(
element=solution["point"].loc[worst_index], solution=solution, data=data
)
return solution
def get_random_solution(previous, n, data):
solution = replace_worst_element(previous, n, data)
while solution["distance"].sum() <= previous["distance"].sum():
solution = replace_worst_element(previous=solution, n=n, data=data)
return solution
def explore_neighbourhood(element, n, data, max_iterations=100000):
neighbourhood = []
neighbourhood.append(element)
for _ in range(max_iterations):
previous_solution = neighbourhood[-1]
neighbour = get_random_solution(previous=previous_solution, n=n, data=data)
neighbourhood.append(neighbour)
return neighbour
def local_search(n, m, data):
first_solution = get_first_random_solution(n, m, data)
best_solution = explore_neighbourhood(
element=first_solution, n=n, data=data, max_iterations=100
)
return best_solution

58
src/main.py Executable file
View File

@ -0,0 +1,58 @@
from preprocessing import parse_file
from genetic_algorithm import genetic_algorithm
from memetic_algorithm import memetic_algorithm
from time import time
from argparse import ArgumentParser
def execute_algorithm(args, n, m, data):
if args.algorithm == "genetic":
return genetic_algorithm(
n,
m,
data,
select_mode=args.selection,
crossover_mode=args.crossover,
max_iterations=100,
)
return memetic_algorithm(
n,
m,
data,
hybridation=args.hybridation,
max_iterations=100,
)
def show_results(solution, time_delta):
duplicates = solution.duplicated().any()
print(solution)
print(f"Total distance: {solution.fitness.values[0]}")
if not duplicates:
print("No duplicates found")
print(f"Execution time: {time_delta}")
def parse_arguments():
parser = ArgumentParser()
parser.add_argument("file", help="dataset of choice")
subparsers = parser.add_subparsers(dest="algorithm")
parser_genetic = subparsers.add_parser("genetic")
parser_memetic = subparsers.add_parser("memetic")
parser_genetic.add_argument("crossover", choices=["uniform", "position"])
parser_genetic.add_argument("selection", choices=["generational", "stationary"])
parser_memetic.add_argument("hybridation", choices=["all", "random", "best"])
return parser.parse_args()
def main():
args = parse_arguments()
n, m, data = parse_file(args.file)
start_time = time()
solutions = execute_algorithm(args, n, m, data)
end_time = time()
show_results(solutions, time_delta=end_time - start_time)
if __name__ == "__main__":
main()

16
src/preprocessing.py Normal file
View File

@ -0,0 +1,16 @@
from pandas import read_table
def read_header(filename):
with open(filename, "r") as f:
header = f.readline().split()
return int(header[0])
def parse_file(filename):
n = read_header(filename)
m = 50
df = read_table(
filename, names=["source", "destination", "distance"], sep=" ", skiprows=[0]
)
return n, m, df