Change fitness evaluation
This commit is contained in:
parent
c2cc3c716d
commit
ab4748d28e
|
@ -1,10 +1,11 @@
|
||||||
from numpy import sum, append, intersect1d, array_equal
|
from numpy import intersect1d, array_equal
|
||||||
from numpy.random import randint, choice, shuffle
|
from numpy.random import randint, choice, shuffle
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
from math import ceil
|
from math import ceil
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from multiprocessing import Pool
|
from multiprocessing import Pool
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
from itertools import combinations
|
||||||
|
|
||||||
|
|
||||||
def get_row_distance(source, destination, data):
|
def get_row_distance(source, destination, data):
|
||||||
|
@ -35,22 +36,23 @@ def generate_individual(n, m, data):
|
||||||
|
|
||||||
|
|
||||||
def evaluate_individual(individual, data):
|
def evaluate_individual(individual, data):
|
||||||
fitness = []
|
fitness = 0
|
||||||
genotype = individual.point.values
|
comb = combinations(individual.index, r=2)
|
||||||
distances = data.query(f"source in @genotype and destination in @genotype")
|
for index in list(comb):
|
||||||
for item in genotype[:-1]:
|
elements = individual.loc[index, :]
|
||||||
element_df = distances.query(f"source == {item} or destination == {item}")
|
fitness += get_row_distance(
|
||||||
max_distance = element_df["distance"].astype(float).max()
|
source=elements["point"].head(n=1).values[0],
|
||||||
fitness = append(arr=fitness, values=max_distance)
|
destination=elements["point"].tail(n=1).values[0],
|
||||||
distances = distances.query(f"source != {item} and destination != {item}")
|
data=data,
|
||||||
individual["fitness"] = sum(fitness)
|
)
|
||||||
|
individual["fitness"] = fitness
|
||||||
return individual
|
return individual
|
||||||
|
|
||||||
|
|
||||||
def select_distinct_genes(matching_genes, parents, m):
|
def select_distinct_genes(matching_genes, parents, m):
|
||||||
first_parent = parents[0].query("point not in @matching_genes")
|
first_parent = parents[0].query("point not in @matching_genes")
|
||||||
second_parent = parents[1].query("point not in @matching_genes")
|
second_parent = parents[1].query("point not in @matching_genes")
|
||||||
cutoff = randint(m - len(matching_genes))
|
cutoff = randint(m - len(matching_genes) + 1)
|
||||||
first_parent_genes = first_parent.point.values[cutoff:]
|
first_parent_genes = first_parent.point.values[cutoff:]
|
||||||
second_parent_genes = second_parent.point.values[:cutoff]
|
second_parent_genes = second_parent.point.values[:cutoff]
|
||||||
return first_parent_genes, second_parent_genes
|
return first_parent_genes, second_parent_genes
|
||||||
|
@ -137,9 +139,8 @@ def group_parents(parents):
|
||||||
first = parents[i]
|
first = parents[i]
|
||||||
second = parents[i + 1]
|
second = parents[i + 1]
|
||||||
if array_equal(first.point.values, second.point.values):
|
if array_equal(first.point.values, second.point.values):
|
||||||
tmp = second
|
random_index = randint(i + 1)
|
||||||
second = parents[i - 2]
|
second, parents[random_index] = parents[random_index], second
|
||||||
parents[i - 2] = tmp
|
|
||||||
parent_pairs.append([first, second])
|
parent_pairs.append([first, second])
|
||||||
return parent_pairs
|
return parent_pairs
|
||||||
|
|
||||||
|
|
55
src/main.py
55
src/main.py
|
@ -1,9 +1,7 @@
|
||||||
from preprocessing import parse_file
|
from preprocessing import parse_file
|
||||||
from genetic_algorithm import genetic_algorithm
|
from genetic_algorithm import genetic_algorithm
|
||||||
from memetic_algorithm import memetic_algorithm
|
from memetic_algorithm import memetic_algorithm
|
||||||
from sys import argv
|
|
||||||
from time import time
|
from time import time
|
||||||
from itertools import combinations
|
|
||||||
from argparse import ArgumentParser
|
from argparse import ArgumentParser
|
||||||
|
|
||||||
|
|
||||||
|
@ -17,54 +15,24 @@ def execute_algorithm(args, n, m, data):
|
||||||
crossover_mode=args.crossover,
|
crossover_mode=args.crossover,
|
||||||
max_iterations=100,
|
max_iterations=100,
|
||||||
)
|
)
|
||||||
else:
|
return memetic_algorithm(
|
||||||
return memetic_algorithm(
|
n,
|
||||||
n,
|
m,
|
||||||
m,
|
data,
|
||||||
data,
|
hybridation=args.hybridation,
|
||||||
hybridation=args.hybridation,
|
max_iterations=100,
|
||||||
max_iterations=100,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_row_distance(source, destination, data):
|
|
||||||
row = data.query(
|
|
||||||
"""(source == @source and destination == @destination) or \
|
|
||||||
(source == @destination and destination == @source)"""
|
|
||||||
)
|
)
|
||||||
return row["distance"].values[0]
|
|
||||||
|
|
||||||
|
|
||||||
def get_fitness(solutions, data):
|
def show_results(solution, time_delta):
|
||||||
counter = 0
|
duplicates = solution.duplicated().any()
|
||||||
comb = combinations(solutions.index, r=2)
|
print(solution)
|
||||||
for index in list(comb):
|
print(f"Total distance: {solution.fitness.values[0]}")
|
||||||
elements = solutions.loc[index, :]
|
|
||||||
counter += get_row_distance(
|
|
||||||
source=elements["point"].head(n=1).values[0],
|
|
||||||
destination=elements["point"].tail(n=1).values[0],
|
|
||||||
data=data,
|
|
||||||
)
|
|
||||||
return counter
|
|
||||||
|
|
||||||
|
|
||||||
def show_results(solutions, fitness, time_delta):
|
|
||||||
duplicates = solutions.duplicated().any()
|
|
||||||
print(solutions)
|
|
||||||
print(f"Total distance: {fitness}")
|
|
||||||
if not duplicates:
|
if not duplicates:
|
||||||
print("No duplicates found")
|
print("No duplicates found")
|
||||||
print(f"Execution time: {time_delta}")
|
print(f"Execution time: {time_delta}")
|
||||||
|
|
||||||
|
|
||||||
def usage(argv):
|
|
||||||
print(f"Usage: python {argv[0]} <file> <algorithm choice> <")
|
|
||||||
print("algorithm choices:")
|
|
||||||
print("genetic: genetic algorithm")
|
|
||||||
print("memetic: memetic algorithm")
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_arguments():
|
def parse_arguments():
|
||||||
parser = ArgumentParser()
|
parser = ArgumentParser()
|
||||||
parser.add_argument("file", help="dataset of choice")
|
parser.add_argument("file", help="dataset of choice")
|
||||||
|
@ -83,8 +51,7 @@ def main():
|
||||||
start_time = time()
|
start_time = time()
|
||||||
solutions = execute_algorithm(args, n, m, data)
|
solutions = execute_algorithm(args, n, m, data)
|
||||||
end_time = time()
|
end_time = time()
|
||||||
fitness = get_fitness(solutions, data)
|
show_results(solutions, time_delta=end_time - start_time)
|
||||||
show_results(solutions, fitness, time_delta=end_time - start_time)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
Loading…
Reference in New Issue