I wrote a genetic algorithm in this article several times ago, but this time I tried to make an optimization algorithm made with the same idea.
You often see fish and birds moving in groups. These herds perform optimal movements with undisturbed movements even when they find enemies or food. It is a method to utilize this for function optimization.
References are around here Introduction to Evolutionary Computation Algorithm Particle swarm optimization and nonlinear system
I haven't done anything complicated and implemented it in the simplest way.
functions.py
import random
# min~Randomly make numbers between max
def randRange(a : float,b : float) -> float:
return a + (b-a)*random.random()
#Generate n particles
def makeParticles(n : int,ranges : dict) -> list:
ls = [{key:0 for key in ranges.keys()} for key in range(n)]
for i in range(n):
for key in ranges.keys():
a,b = ranges[key]
ls[i][key] = randRange(a,b)
return ls
#Add the same elements of two dicts
def dictAdd(*dic : dict) -> dict:
ansDic = {}
for key in dic[0].keys():
ansDic[key] = 0
for i in range(len(dic)):
ansDic[key] += dic[i][key]
return ansDic
#Subtract the same elements of two dicts
def dictDiff(dic1 : dict,dic2 : dict) -> dict:
ansDic = {}
for key in dic1.keys():
ansDic[key] = dic1[key] - dic2[key]
return ansDic
#Product of the same elements of two dicts
def dictMul(dic1 : dict, dic2 : dict) -> dict:
ansDic = {}
for key in dic1.keys():
ansDic[key] = dic1[key] * dic2[key]
#Multiply each element of the dict
def dictNumMul(dic : dict,n) -> dict:
ansDic = {}
for key in dic.keys():
ansDic[key] = dic[key] * n
return ansDic
PSO.py
import functions as func
import numpy as np
import copy
"""
time_max Maximum number of iterations
swam_size Number of particles
inertia inertia coefficient
ap Personal vest acceleration factor
ag Global best acceleration factor
"""
class PSO:
def __init__(self,time_max = 1000,swam_size = 100,
inertia = 0.9,ap = 0.8,ag = 0.8):
self.time_max = time_max
self.swam_size = swam_size
self.inertia = inertia
self.ap = ap
self.ag = ag
self.pBest = [np.inf for i in range(swam_size)]
self.gBest = np.inf
self.gPosi = {}
"""
Pass the function you want to minimize to the f argument of the optimize function
dict for para argument
{"Argument name":[minimum value,Maximum value]}
I pass it like that.
"""
#Receive the function you want to optimize and its arguments
def optimize(self,f,para):
self.function = f
self.particleInitialization(para)
self.Update()
for i in range(self.time_max):
self.move()
self.Update()
return self.gPosi,self.gBest
#Initialize the particles
def particleInitialization(self,ls : dict):
self.particle = func.makeParticles(self.swam_size,ls)
self.pPosi = copy.deepcopy(self.particle)
self.velocity = [{key:0 for key in ls.keys()} for i in range(self.swam_size)]
#Move
def move(self):
for i in range(self.swam_size):
v0 = func.dictNumMul(self.velocity[i],self.inertia)
v1 = func.dictNumMul(func.dictDiff(self.pPosi[i],self.particle[i]),self.ap*func.randRange(0,1))
v2 = func.dictNumMul(func.dictDiff(self.gPosi,self.particle[i]),self.ag*func.randRange(0,1))
v = func.dictAdd(v0,v1,v2)
self.particle[i] = func.dictAdd(self.particle[i],v)
self.velocity[i] = v
#Update personal and global vests
def Update(self):
for i in range(self.swam_size):
cost = self.function(**self.particle[i])
if cost < self.pBest[i]:
self.pBest[i] = cost
self.pPosi[i] = copy.deepcopy(self.particle[i])
if cost < self.gBest:
self.gBest = cost
self.gPosi = copy.deepcopy(self.particle[i])
main.py
import PSO
#Define the function you want to minimize
def f(x,y):
return x**4+4*x**3-8*(x+1)**2-1 + y**4 + x**3 - 5*(y - 2)**2 - 3*x
def main():
pso = PSO.PSO(time_max=1000)
#Early particles-10000<x<10000,-10000<y<Generate in the range of 10000 and minimize
a,b = pso.optimize(f,{"x":[-10000,10000],"y":[-10000,10000]})
print("Coordinate",a,"minimum value",b)
if __name__ == "__main__":
main()
output Coordinates {'x': -4.412547925687121,'y': -2.187602966185929} Minimum value -196.17514912856427
I was able to minimize it.
[Results with wolframalpha](https://www.wolframalpha.com/input/?i=minimize%5Bx%5E4%2B4x%5E3-8%28x%2B1%29%5E2-1+%2B+ y% 5E4 +% 2B + x% 5E3 +-+ 5 *% 28y +-+ 2% 29% 5E2 +-+ 3 * x% 5D)
It was up to this point when I uploaded it, but it's really dull, so I'd like to perform logistic regression using particle swarm optimization.
Here, the classification boundary is predicted by minimizing the cross-entropy error function by the particle swarm optimization method.
logistic.py
import functions as func
import math
import PSO
import numpy
#Boundary when labeling generated random numbers
def f(x,y):
if x + 2 * y < 11:return 1
else:return 0
#Sigmoid function
def sigmoid(x):
if 600 > x > -600:
return 1/(1 + math.exp(-x))
elif x > 0:
return 1
else:
return 0
#To return infinity when it is zero
def llog(x):
if x == 0:
return -numpy.inf
return math.log(x)
#Generate data with random numbers
data = [[func.randRange(0,10),func.randRange(0,10)] for i in range(100)]
label = [f(x,y) for x,y in data]
#Cost function to minimize
def loss(w1,w2,b):
ent = 0
for i in range(len(data)):
x = sigmoid(data[i][0]*w1 + data[i][1]*w2 + b)
y = label[i]
ent -= y * llog(x) + (1-y) * llog(1-x)
return ent
def main():
pso = PSO.PSO()
a,b = pso.optimize(loss,{"w1":[-100,100],"w2":[-100,100],"b":[-100,100]})
print(a,b)
#Evaluate whether the created model can classify the training data properly
c = 0
for i in range(100):
n = sigmoid(data[i][0]*a["w1"] + data[i][1]*a["w2"] + a["b"])
ans = 1 if n > 0.5 else 0
if ans == label[i]:
c += 1
print("Classification accuracy",c/100)
if __name__ == "__main__":
main()
result
{'w1': -5.345994566644921, 'w2': -10.19111400412491, 'b': 56.97834543330356} 2.744570556357321 Classification accuracy 1.0
I was able to classify it firmly.
Recommended Posts