Compare commits

..

No commits in common. "4e640ffc2d183cfb3a5ccabc407d61b49c6bae27" and "c2cc3c716dd127a87f39422b579de1bad050a7d8" have entirely different histories.

3 changed files with 104 additions and 44 deletions

View File

@ -1,11 +1,10 @@
from numpy import intersect1d, array_equal from numpy import sum, append, intersect1d, array_equal
from numpy.random import randint, choice, shuffle from numpy.random import randint, choice, shuffle
from pandas import DataFrame from pandas import DataFrame
from math import ceil from math import ceil
from functools import partial from functools import partial
from multiprocessing import Pool from multiprocessing import Pool
from copy import deepcopy from copy import deepcopy
from itertools import combinations
def get_row_distance(source, destination, data): def get_row_distance(source, destination, data):
@ -36,23 +35,22 @@ def generate_individual(n, m, data):
def evaluate_individual(individual, data): def evaluate_individual(individual, data):
fitness = 0 fitness = []
comb = combinations(individual.index, r=2) genotype = individual.point.values
for index in list(comb): distances = data.query(f"source in @genotype and destination in @genotype")
elements = individual.loc[index, :] for item in genotype[:-1]:
fitness += get_row_distance( element_df = distances.query(f"source == {item} or destination == {item}")
source=elements["point"].head(n=1).values[0], max_distance = element_df["distance"].astype(float).max()
destination=elements["point"].tail(n=1).values[0], fitness = append(arr=fitness, values=max_distance)
data=data, distances = distances.query(f"source != {item} and destination != {item}")
) individual["fitness"] = sum(fitness)
individual["fitness"] = fitness
return individual return individual
def select_distinct_genes(matching_genes, parents, m): def select_distinct_genes(matching_genes, parents, m):
first_parent = parents[0].query("point not in @matching_genes") first_parent = parents[0].query("point not in @matching_genes")
second_parent = parents[1].query("point not in @matching_genes") second_parent = parents[1].query("point not in @matching_genes")
cutoff = randint(m - len(matching_genes) + 1) cutoff = randint(m - len(matching_genes))
first_parent_genes = first_parent.point.values[cutoff:] first_parent_genes = first_parent.point.values[cutoff:]
second_parent_genes = second_parent.point.values[:cutoff] second_parent_genes = second_parent.point.values[:cutoff]
return first_parent_genes, second_parent_genes return first_parent_genes, second_parent_genes
@ -139,8 +137,9 @@ def group_parents(parents):
first = parents[i] first = parents[i]
second = parents[i + 1] second = parents[i + 1]
if array_equal(first.point.values, second.point.values): if array_equal(first.point.values, second.point.values):
random_index = randint(i + 1) tmp = second
second, parents[random_index] = parents[random_index], second second = parents[i - 2]
parents[i - 2] = tmp
parent_pairs.append([first, second]) parent_pairs.append([first, second])
return parent_pairs return parent_pairs

View File

@ -1,7 +1,9 @@
from preprocessing import parse_file from preprocessing import parse_file
from genetic_algorithm import genetic_algorithm from genetic_algorithm import genetic_algorithm
from memetic_algorithm import memetic_algorithm from memetic_algorithm import memetic_algorithm
from sys import argv
from time import time from time import time
from itertools import combinations
from argparse import ArgumentParser from argparse import ArgumentParser
@ -15,24 +17,54 @@ def execute_algorithm(args, n, m, data):
crossover_mode=args.crossover, crossover_mode=args.crossover,
max_iterations=100, max_iterations=100,
) )
return memetic_algorithm( else:
n, return memetic_algorithm(
m, n,
data, m,
hybridation=args.hybridation, data,
max_iterations=100, hybridation=args.hybridation,
max_iterations=100,
)
def get_row_distance(source, destination, data):
row = data.query(
"""(source == @source and destination == @destination) or \
(source == @destination and destination == @source)"""
) )
return row["distance"].values[0]
def show_results(solution, time_delta): def get_fitness(solutions, data):
duplicates = solution.duplicated().any() counter = 0
print(solution) comb = combinations(solutions.index, r=2)
print(f"Total distance: {solution.fitness.values[0]}") for index in list(comb):
elements = solutions.loc[index, :]
counter += get_row_distance(
source=elements["point"].head(n=1).values[0],
destination=elements["point"].tail(n=1).values[0],
data=data,
)
return counter
def show_results(solutions, fitness, time_delta):
duplicates = solutions.duplicated().any()
print(solutions)
print(f"Total distance: {fitness}")
if not duplicates: if not duplicates:
print("No duplicates found") print("No duplicates found")
print(f"Execution time: {time_delta}") print(f"Execution time: {time_delta}")
def usage(argv):
print(f"Usage: python {argv[0]} <file> <algorithm choice> <")
print("algorithm choices:")
print("genetic: genetic algorithm")
print("memetic: memetic algorithm")
exit(1)
def parse_arguments(): def parse_arguments():
parser = ArgumentParser() parser = ArgumentParser()
parser.add_argument("file", help="dataset of choice") parser.add_argument("file", help="dataset of choice")
@ -51,7 +83,8 @@ def main():
start_time = time() start_time = time()
solutions = execute_algorithm(args, n, m, data) solutions = execute_algorithm(args, n, m, data)
end_time = time() end_time = time()
show_results(solutions, time_delta=end_time - start_time) fitness = get_fitness(solutions, data)
show_results(solutions, fitness, time_delta=end_time - start_time)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -1,22 +1,50 @@
from genetic_algorithm import * from numpy.random import choice, seed
from local_search import local_search
def run_local_search(n, m, data, individual): def get_first_random_solution(m, data):
pass seed(42)
random_indexes = choice(len(data.index), size=m, replace=False)
return data.loc[random_indexes]
def memetic_algorithm(n, m, data, hybridation, max_iterations=100000): def element_in_dataframe(solution, element):
population = [generate_individual(n, m, data) for _ in range(n)] duplicates = solution.query(
population = evaluate_population(population, data) f"(source == {element.source} and destination == {element.destination}) or (source == {element.destination} and destination == {element.source})"
for i in range(max_iterations): )
if i % 10 == 0: return not duplicates.empty
best_index, _ = get_best_elements(population)
run_local_search(n, m, data, individual=population[best_index])
parents = select_parents(population, n, mode="stationary") def replace_worst_element(previous, data):
offspring = crossover(mode="uniform", parents=parents, m=m) solution = previous.copy()
offspring = mutate(offspring, n, data) worst_index = solution["distance"].astype(float).idxmin()
population = replace_population(population, offspring, mode="stationary") random_element = data.sample().squeeze()
population = evaluate_population(population, data) while element_in_dataframe(solution=solution, element=random_element):
best_index, _ = get_best_elements(population) random_element = data.sample().squeeze()
return population[best_index] solution.loc[worst_index] = random_element
return solution, worst_index
def get_random_solution(previous, data):
solution, worst_index = replace_worst_element(previous, data)
previous_worst_distance = previous["distance"].loc[worst_index]
while solution.distance.loc[worst_index] <= previous_worst_distance:
solution, _ = replace_worst_element(previous=solution, data=data)
return solution
def explore_neighbourhood(element, data, max_iterations=100000):
neighbourhood = []
neighbourhood.append(element)
for _ in range(max_iterations):
previous_solution = neighbourhood[-1]
neighbour = get_random_solution(previous=previous_solution, data=data)
neighbourhood.append(neighbour)
return neighbour
def memetic_algorithm(m, data):
first_solution = get_first_random_solution(m=m, data=data)
best_solution = explore_neighbourhood(
element=first_solution, data=data, max_iterations=100
)
return best_solution