Compare commits
2 Commits
c2cc3c716d
...
4e640ffc2d
Author | SHA1 | Date |
---|---|---|
coolneng | 4e640ffc2d | |
coolneng | ab4748d28e |
|
@ -1,10 +1,11 @@
|
|||
from numpy import sum, append, intersect1d, array_equal
|
||||
from numpy import intersect1d, array_equal
|
||||
from numpy.random import randint, choice, shuffle
|
||||
from pandas import DataFrame
|
||||
from math import ceil
|
||||
from functools import partial
|
||||
from multiprocessing import Pool
|
||||
from copy import deepcopy
|
||||
from itertools import combinations
|
||||
|
||||
|
||||
def get_row_distance(source, destination, data):
|
||||
|
@ -35,22 +36,23 @@ def generate_individual(n, m, data):
|
|||
|
||||
|
||||
def evaluate_individual(individual, data):
|
||||
fitness = []
|
||||
genotype = individual.point.values
|
||||
distances = data.query(f"source in @genotype and destination in @genotype")
|
||||
for item in genotype[:-1]:
|
||||
element_df = distances.query(f"source == {item} or destination == {item}")
|
||||
max_distance = element_df["distance"].astype(float).max()
|
||||
fitness = append(arr=fitness, values=max_distance)
|
||||
distances = distances.query(f"source != {item} and destination != {item}")
|
||||
individual["fitness"] = sum(fitness)
|
||||
fitness = 0
|
||||
comb = combinations(individual.index, r=2)
|
||||
for index in list(comb):
|
||||
elements = individual.loc[index, :]
|
||||
fitness += get_row_distance(
|
||||
source=elements["point"].head(n=1).values[0],
|
||||
destination=elements["point"].tail(n=1).values[0],
|
||||
data=data,
|
||||
)
|
||||
individual["fitness"] = fitness
|
||||
return individual
|
||||
|
||||
|
||||
def select_distinct_genes(matching_genes, parents, m):
|
||||
first_parent = parents[0].query("point not in @matching_genes")
|
||||
second_parent = parents[1].query("point not in @matching_genes")
|
||||
cutoff = randint(m - len(matching_genes))
|
||||
cutoff = randint(m - len(matching_genes) + 1)
|
||||
first_parent_genes = first_parent.point.values[cutoff:]
|
||||
second_parent_genes = second_parent.point.values[:cutoff]
|
||||
return first_parent_genes, second_parent_genes
|
||||
|
@ -137,9 +139,8 @@ def group_parents(parents):
|
|||
first = parents[i]
|
||||
second = parents[i + 1]
|
||||
if array_equal(first.point.values, second.point.values):
|
||||
tmp = second
|
||||
second = parents[i - 2]
|
||||
parents[i - 2] = tmp
|
||||
random_index = randint(i + 1)
|
||||
second, parents[random_index] = parents[random_index], second
|
||||
parent_pairs.append([first, second])
|
||||
return parent_pairs
|
||||
|
||||
|
|
55
src/main.py
55
src/main.py
|
@ -1,9 +1,7 @@
|
|||
from preprocessing import parse_file
|
||||
from genetic_algorithm import genetic_algorithm
|
||||
from memetic_algorithm import memetic_algorithm
|
||||
from sys import argv
|
||||
from time import time
|
||||
from itertools import combinations
|
||||
from argparse import ArgumentParser
|
||||
|
||||
|
||||
|
@ -17,54 +15,24 @@ def execute_algorithm(args, n, m, data):
|
|||
crossover_mode=args.crossover,
|
||||
max_iterations=100,
|
||||
)
|
||||
else:
|
||||
return memetic_algorithm(
|
||||
n,
|
||||
m,
|
||||
data,
|
||||
hybridation=args.hybridation,
|
||||
max_iterations=100,
|
||||
)
|
||||
|
||||
|
||||
def get_row_distance(source, destination, data):
|
||||
row = data.query(
|
||||
"""(source == @source and destination == @destination) or \
|
||||
(source == @destination and destination == @source)"""
|
||||
return memetic_algorithm(
|
||||
n,
|
||||
m,
|
||||
data,
|
||||
hybridation=args.hybridation,
|
||||
max_iterations=100,
|
||||
)
|
||||
return row["distance"].values[0]
|
||||
|
||||
|
||||
def get_fitness(solutions, data):
|
||||
counter = 0
|
||||
comb = combinations(solutions.index, r=2)
|
||||
for index in list(comb):
|
||||
elements = solutions.loc[index, :]
|
||||
counter += get_row_distance(
|
||||
source=elements["point"].head(n=1).values[0],
|
||||
destination=elements["point"].tail(n=1).values[0],
|
||||
data=data,
|
||||
)
|
||||
return counter
|
||||
|
||||
|
||||
def show_results(solutions, fitness, time_delta):
|
||||
duplicates = solutions.duplicated().any()
|
||||
print(solutions)
|
||||
print(f"Total distance: {fitness}")
|
||||
def show_results(solution, time_delta):
|
||||
duplicates = solution.duplicated().any()
|
||||
print(solution)
|
||||
print(f"Total distance: {solution.fitness.values[0]}")
|
||||
if not duplicates:
|
||||
print("No duplicates found")
|
||||
print(f"Execution time: {time_delta}")
|
||||
|
||||
|
||||
def usage(argv):
|
||||
print(f"Usage: python {argv[0]} <file> <algorithm choice> <")
|
||||
print("algorithm choices:")
|
||||
print("genetic: genetic algorithm")
|
||||
print("memetic: memetic algorithm")
|
||||
exit(1)
|
||||
|
||||
|
||||
def parse_arguments():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("file", help="dataset of choice")
|
||||
|
@ -83,8 +51,7 @@ def main():
|
|||
start_time = time()
|
||||
solutions = execute_algorithm(args, n, m, data)
|
||||
end_time = time()
|
||||
fitness = get_fitness(solutions, data)
|
||||
show_results(solutions, fitness, time_delta=end_time - start_time)
|
||||
show_results(solutions, time_delta=end_time - start_time)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -1,50 +1,22 @@
|
|||
from numpy.random import choice, seed
|
||||
from genetic_algorithm import *
|
||||
from local_search import local_search
|
||||
|
||||
|
||||
def get_first_random_solution(m, data):
|
||||
seed(42)
|
||||
random_indexes = choice(len(data.index), size=m, replace=False)
|
||||
return data.loc[random_indexes]
|
||||
def run_local_search(n, m, data, individual):
|
||||
pass
|
||||
|
||||
|
||||
def element_in_dataframe(solution, element):
|
||||
duplicates = solution.query(
|
||||
f"(source == {element.source} and destination == {element.destination}) or (source == {element.destination} and destination == {element.source})"
|
||||
)
|
||||
return not duplicates.empty
|
||||
|
||||
|
||||
def replace_worst_element(previous, data):
|
||||
solution = previous.copy()
|
||||
worst_index = solution["distance"].astype(float).idxmin()
|
||||
random_element = data.sample().squeeze()
|
||||
while element_in_dataframe(solution=solution, element=random_element):
|
||||
random_element = data.sample().squeeze()
|
||||
solution.loc[worst_index] = random_element
|
||||
return solution, worst_index
|
||||
|
||||
|
||||
def get_random_solution(previous, data):
|
||||
solution, worst_index = replace_worst_element(previous, data)
|
||||
previous_worst_distance = previous["distance"].loc[worst_index]
|
||||
while solution.distance.loc[worst_index] <= previous_worst_distance:
|
||||
solution, _ = replace_worst_element(previous=solution, data=data)
|
||||
return solution
|
||||
|
||||
|
||||
def explore_neighbourhood(element, data, max_iterations=100000):
|
||||
neighbourhood = []
|
||||
neighbourhood.append(element)
|
||||
for _ in range(max_iterations):
|
||||
previous_solution = neighbourhood[-1]
|
||||
neighbour = get_random_solution(previous=previous_solution, data=data)
|
||||
neighbourhood.append(neighbour)
|
||||
return neighbour
|
||||
|
||||
|
||||
def memetic_algorithm(m, data):
|
||||
first_solution = get_first_random_solution(m=m, data=data)
|
||||
best_solution = explore_neighbourhood(
|
||||
element=first_solution, data=data, max_iterations=100
|
||||
)
|
||||
return best_solution
|
||||
def memetic_algorithm(n, m, data, hybridation, max_iterations=100000):
|
||||
population = [generate_individual(n, m, data) for _ in range(n)]
|
||||
population = evaluate_population(population, data)
|
||||
for i in range(max_iterations):
|
||||
if i % 10 == 0:
|
||||
best_index, _ = get_best_elements(population)
|
||||
run_local_search(n, m, data, individual=population[best_index])
|
||||
parents = select_parents(population, n, mode="stationary")
|
||||
offspring = crossover(mode="uniform", parents=parents, m=m)
|
||||
offspring = mutate(offspring, n, data)
|
||||
population = replace_population(population, offspring, mode="stationary")
|
||||
population = evaluate_population(population, data)
|
||||
best_index, _ = get_best_elements(population)
|
||||
return population[best_index]
|
||||
|
|
Loading…
Reference in New Issue