-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrandomized_optimization.py
More file actions
154 lines (135 loc) · 5.87 KB
/
randomized_optimization.py
File metadata and controls
154 lines (135 loc) · 5.87 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
import argparse
import matplotlib
import matplotlib.pyplot as plt
import mlrose_hiive as mlrose
from mlrose_hiive.algorithms.decay import GeomDecay, ArithDecay, ExpDecay
import numpy as np
COMPLEXITY = 40
MAX_ATTEMPTS = 100
fitness_evals = []
def get_custom_fitness(fitness):
def counting_fitness(state):
global fitness_evals
score = fitness.evaluate(state)
fitness_evals.append( score )
return score
return counting_fitness
def get_fitness_eval_curve():
fitness_eval_x = []
fitness_eval_y = []
maximum = 0
for i in range(len(fitness_evals)):
if fitness_evals[i] > maximum:
maximum = fitness_evals[i]
fitness_eval_x.append(i)
fitness_eval_y.append(fitness_evals[i])
return fitness_eval_x, fitness_eval_y
class OptimizationProblem:
def __init__(self, complexity, fitness_fn):
self.problem = mlrose.DiscreteOpt(length = complexity, fitness_fn = fitness_fn)
self.max_attempts = MAX_ATTEMPTS
def solveAndAverage(self, alg, **kwargs):
scores = []
curves = []
max_score = 0
best_state = None
# THIS MAY TAKE A WHILE, CHANGE THIS RANGE TO range(1,5) FOR A FASTER BUT ROUGHER RUN
for random_seed in range(1, 21):
state, score, curve = alg(**kwargs, random_state = random_seed)
scores.append(score)
curves.append(curve)
if score > max_score:
max_score = score
best_state = state
average_score = np.mean(scores)
average_curve = [np.mean([c[i] for c in curves]) for i in range(min(len(c) for c in curves))]
return best_state, average_score, average_curve
def solveWithHillClimbing(self):
return self.solveAndAverage(alg = mlrose.random_hill_climb,
problem = self.problem,
max_attempts = self.max_attempts,
restarts = 30,
curve = True)
def solveWithSimulatedAnnealing(self):
return self.solveAndAverage(alg = mlrose.simulated_annealing,
problem = self.problem,
schedule = ExpDecay(),
max_attempts = self.max_attempts,
curve = True)
def solveWithGeneticAlg(self):
return self.solveAndAverage(alg = mlrose.genetic_alg,
pop_size = 160,
mutation_prob = 0.4,
problem = self.problem,
max_attempts = 10,
curve = True)
def solveWithMimic(self):
return self.solveAndAverage(alg = mlrose.mimic,
pop_size = 160,
keep_pct = 0.3,
problem = self.problem,
max_attempts = self.max_attempts,
curve = True)
def fitnessVsIterations(fitness, problem_name):
OP = OptimizationProblem(complexity = COMPLEXITY, fitness_fn = fitness)
global fitness_evals
fitness_evals = []
best_state, best_fitness, rhc_curve = OP.solveWithHillClimbing()
print( 'Best state found by RHC: %s' % best_state )
print( 'Best fitness found by RHC: %s' % best_fitness )
x, y = get_fitness_eval_curve()
plt.plot(x, y)
fitness_evals = []
best_state, best_fitness, annealing_curve = OP.solveWithSimulatedAnnealing()
print( 'Best state found by Simulated Annealing: %s' % best_state )
print( 'Best fitness found by Simulated Annealing: %s' % best_fitness )
x, y = get_fitness_eval_curve()
plt.plot(x, y)
fitness_evals = []
best_state, best_fitness, genetic_curve = OP.solveWithGeneticAlg()
print( 'Best state found by Genetic Alg: %s' % best_state )
print( 'Best fitness found by Genetic Alg: %s' % best_fitness )
x, y = get_fitness_eval_curve()
plt.plot(x, y)
fitness_evals = []
best_state, best_fitness, mimic_curve = OP.solveWithMimic()
print( 'Best state found by MIMIC: %s' % best_state )
print( 'Best fitness found by MIMIC: %s' % best_fitness )
x, y = get_fitness_eval_curve()
plt.plot(x, y)
plt.legend(['Random Hill Climbing', 'Simulated Annealing', 'Genetic Alg', 'MIMIC'])
plt.suptitle(f'{problem_name}: Fitness Vs Fitness Function Evaluations')
plt.ylabel('Fitness Score')
plt.xlabel('Evaluations')
plt.show()
plt.plot(rhc_curve)
plt.plot(annealing_curve)
plt.plot(genetic_curve)
plt.plot(mimic_curve)
plt.legend(['Random Hill Climbing', 'Simulated Annealing', 'Genetic Alg', 'MIMIC'])
plt.suptitle(f'{problem_name}: Fitness Vs Iterations')
plt.ylabel('Fitness Score')
plt.xlabel('Iterations')
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument( '--problem', action = 'store', dest = 'problem', required = True )
args = parser.parse_args()
if args.problem == 'onemax':
fitness = mlrose.OneMax()
problem_name = 'Onemax'
elif args.problem == 'four_peaks':
fitness = mlrose.FourPeaks(t_pct = 0.15)
problem_name = 'Four Peaks'
elif args.problem == 'kcolor':
edges = [(0, 1), (0, 2), (0, 4), (1, 3), (2, 0), (2, 3), (3, 4)]
fitness = mlrose.MaxKColor(edges)
# problem = mlrose.DiscreteOpt(length = 5, fitness_fn = fitness)
elif args.problem == 'flipflop':
fitness = mlrose.FlipFlop()
problem_name = 'Flip Flop'
else:
raise RuntimeError("Invalid problem argument")
custom_fitness_function = get_custom_fitness(fitness)
custom_fitness = mlrose.CustomFitness(custom_fitness_function)
fitnessVsIterations(custom_fitness, problem_name)