Compare commits

...

2 Commits

Author SHA1 Message Date
coolneng 4e640ffc2d
Add memetic algorithm prototype 2021-06-21 07:39:51 +02:00
coolneng ab4748d28e
Change fitness evaluation 2021-06-21 07:39:39 +02:00
3 changed files with 44 additions and 104 deletions

View File

@ -1,10 +1,11 @@
from numpy import sum, append, intersect1d, array_equal from numpy import intersect1d, array_equal
from numpy.random import randint, choice, shuffle from numpy.random import randint, choice, shuffle
from pandas import DataFrame from pandas import DataFrame
from math import ceil from math import ceil
from functools import partial from functools import partial
from multiprocessing import Pool from multiprocessing import Pool
from copy import deepcopy from copy import deepcopy
from itertools import combinations
def get_row_distance(source, destination, data): def get_row_distance(source, destination, data):
@ -35,22 +36,23 @@ def generate_individual(n, m, data):
def evaluate_individual(individual, data): def evaluate_individual(individual, data):
fitness = [] fitness = 0
genotype = individual.point.values comb = combinations(individual.index, r=2)
distances = data.query(f"source in @genotype and destination in @genotype") for index in list(comb):
for item in genotype[:-1]: elements = individual.loc[index, :]
element_df = distances.query(f"source == {item} or destination == {item}") fitness += get_row_distance(
max_distance = element_df["distance"].astype(float).max() source=elements["point"].head(n=1).values[0],
fitness = append(arr=fitness, values=max_distance) destination=elements["point"].tail(n=1).values[0],
distances = distances.query(f"source != {item} and destination != {item}") data=data,
individual["fitness"] = sum(fitness) )
individual["fitness"] = fitness
return individual return individual
def select_distinct_genes(matching_genes, parents, m): def select_distinct_genes(matching_genes, parents, m):
first_parent = parents[0].query("point not in @matching_genes") first_parent = parents[0].query("point not in @matching_genes")
second_parent = parents[1].query("point not in @matching_genes") second_parent = parents[1].query("point not in @matching_genes")
cutoff = randint(m - len(matching_genes)) cutoff = randint(m - len(matching_genes) + 1)
first_parent_genes = first_parent.point.values[cutoff:] first_parent_genes = first_parent.point.values[cutoff:]
second_parent_genes = second_parent.point.values[:cutoff] second_parent_genes = second_parent.point.values[:cutoff]
return first_parent_genes, second_parent_genes return first_parent_genes, second_parent_genes
@ -137,9 +139,8 @@ def group_parents(parents):
first = parents[i] first = parents[i]
second = parents[i + 1] second = parents[i + 1]
if array_equal(first.point.values, second.point.values): if array_equal(first.point.values, second.point.values):
tmp = second random_index = randint(i + 1)
second = parents[i - 2] second, parents[random_index] = parents[random_index], second
parents[i - 2] = tmp
parent_pairs.append([first, second]) parent_pairs.append([first, second])
return parent_pairs return parent_pairs

View File

@ -1,9 +1,7 @@
from preprocessing import parse_file from preprocessing import parse_file
from genetic_algorithm import genetic_algorithm from genetic_algorithm import genetic_algorithm
from memetic_algorithm import memetic_algorithm from memetic_algorithm import memetic_algorithm
from sys import argv
from time import time from time import time
from itertools import combinations
from argparse import ArgumentParser from argparse import ArgumentParser
@ -17,54 +15,24 @@ def execute_algorithm(args, n, m, data):
crossover_mode=args.crossover, crossover_mode=args.crossover,
max_iterations=100, max_iterations=100,
) )
else: return memetic_algorithm(
return memetic_algorithm( n,
n, m,
m, data,
data, hybridation=args.hybridation,
hybridation=args.hybridation, max_iterations=100,
max_iterations=100,
)
def get_row_distance(source, destination, data):
row = data.query(
"""(source == @source and destination == @destination) or \
(source == @destination and destination == @source)"""
) )
return row["distance"].values[0]
def get_fitness(solutions, data): def show_results(solution, time_delta):
counter = 0 duplicates = solution.duplicated().any()
comb = combinations(solutions.index, r=2) print(solution)
for index in list(comb): print(f"Total distance: {solution.fitness.values[0]}")
elements = solutions.loc[index, :]
counter += get_row_distance(
source=elements["point"].head(n=1).values[0],
destination=elements["point"].tail(n=1).values[0],
data=data,
)
return counter
def show_results(solutions, fitness, time_delta):
duplicates = solutions.duplicated().any()
print(solutions)
print(f"Total distance: {fitness}")
if not duplicates: if not duplicates:
print("No duplicates found") print("No duplicates found")
print(f"Execution time: {time_delta}") print(f"Execution time: {time_delta}")
def usage(argv):
print(f"Usage: python {argv[0]} <file> <algorithm choice> <")
print("algorithm choices:")
print("genetic: genetic algorithm")
print("memetic: memetic algorithm")
exit(1)
def parse_arguments(): def parse_arguments():
parser = ArgumentParser() parser = ArgumentParser()
parser.add_argument("file", help="dataset of choice") parser.add_argument("file", help="dataset of choice")
@ -83,8 +51,7 @@ def main():
start_time = time() start_time = time()
solutions = execute_algorithm(args, n, m, data) solutions = execute_algorithm(args, n, m, data)
end_time = time() end_time = time()
fitness = get_fitness(solutions, data) show_results(solutions, time_delta=end_time - start_time)
show_results(solutions, fitness, time_delta=end_time - start_time)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -1,50 +1,22 @@
from numpy.random import choice, seed from genetic_algorithm import *
from local_search import local_search
def get_first_random_solution(m, data): def run_local_search(n, m, data, individual):
seed(42) pass
random_indexes = choice(len(data.index), size=m, replace=False)
return data.loc[random_indexes]
def element_in_dataframe(solution, element): def memetic_algorithm(n, m, data, hybridation, max_iterations=100000):
duplicates = solution.query( population = [generate_individual(n, m, data) for _ in range(n)]
f"(source == {element.source} and destination == {element.destination}) or (source == {element.destination} and destination == {element.source})" population = evaluate_population(population, data)
) for i in range(max_iterations):
return not duplicates.empty if i % 10 == 0:
best_index, _ = get_best_elements(population)
run_local_search(n, m, data, individual=population[best_index])
def replace_worst_element(previous, data): parents = select_parents(population, n, mode="stationary")
solution = previous.copy() offspring = crossover(mode="uniform", parents=parents, m=m)
worst_index = solution["distance"].astype(float).idxmin() offspring = mutate(offspring, n, data)
random_element = data.sample().squeeze() population = replace_population(population, offspring, mode="stationary")
while element_in_dataframe(solution=solution, element=random_element): population = evaluate_population(population, data)
random_element = data.sample().squeeze() best_index, _ = get_best_elements(population)
solution.loc[worst_index] = random_element return population[best_index]
return solution, worst_index
def get_random_solution(previous, data):
solution, worst_index = replace_worst_element(previous, data)
previous_worst_distance = previous["distance"].loc[worst_index]
while solution.distance.loc[worst_index] <= previous_worst_distance:
solution, _ = replace_worst_element(previous=solution, data=data)
return solution
def explore_neighbourhood(element, data, max_iterations=100000):
neighbourhood = []
neighbourhood.append(element)
for _ in range(max_iterations):
previous_solution = neighbourhood[-1]
neighbour = get_random_solution(previous=previous_solution, data=data)
neighbourhood.append(neighbour)
return neighbour
def memetic_algorithm(m, data):
first_solution = get_first_random_solution(m=m, data=data)
best_solution = explore_neighbourhood(
element=first_solution, data=data, max_iterations=100
)
return best_solution