• Python优化算法08——粘液霉菌算法


    参考链接:http://www.alimirjalili.com/SMA.html

                      https://tinyurl.com/Slime-mould-algorithm.


    这个优化算法是2020年提出来的,还算比较新的算法,上面两个链接都有源码。

    本着能用的代码就是好代码的原则,我把人家的代码搬了过来组合了一下,测试后可以使用...

    直接上代码


    粘液霉菌算法(SMA

    这一段是定义优化算法一般的功能

    1. from numpy import where, clip, logical_and, ones, array, ceil
    2. from numpy.random import uniform, choice
    3. from copy import deepcopy
    4. from numpy import abs, zeros, log10, where, arctanh, tanh
    5. class Root:
    6. """ This is root of all Algorithms """
    7. ID_MIN_PROB = 0 # min problem
    8. ID_MAX_PROB = -1 # max problem
    9. ID_POS = 0 # Position
    10. ID_FIT = 1 # Fitness
    11. EPSILON = 10E-10
    12. def __init__(self, obj_func=None, lb=None, ub=None, problem_size=50, verbose=True):
    13. """
    14. Parameters
    15. ----------
    16. obj_func : function
    17. lb : list
    18. ub : list
    19. problem_size : int, optional
    20. batch_size: int, optional
    21. verbose : bool, optional
    22. """
    23. self.obj_func = obj_func
    24. if (lb is None) or (ub is None):
    25. if problem_size is None:
    26. print("Problem size must be an int number")
    27. exit(0)
    28. elif problem_size <= 0:
    29. print("Problem size must > 0")
    30. exit(0)
    31. else:
    32. self.problem_size = int(ceil(problem_size))
    33. self.lb = -1 * ones(problem_size)
    34. self.ub = 1 * ones(problem_size)
    35. else:
    36. if isinstance(lb, list) and isinstance(ub, list) and not (problem_size is None):
    37. if (len(lb) == len(ub)) and (problem_size > 0):
    38. if len(lb) == 1:
    39. self.problem_size = problem_size
    40. self.lb = lb[0] * ones(problem_size)
    41. self.ub = ub[0] * ones(problem_size)
    42. else:
    43. self.problem_size = len(lb)
    44. self.lb = array(lb)
    45. self.ub = array(ub)
    46. else:
    47. print("Lower bound and Upper bound need to be same length. Problem size must > 0")
    48. exit(0)
    49. else:
    50. print("Lower bound and Upper bound need to be a list. Problem size is an int number")
    51. exit(0)
    52. self.verbose = verbose
    53. self.epoch, self.pop_size = None, None
    54. self.solution, self.loss_train = None, []
    55. def create_solution(self, minmax=0):
    56. """ Return the position position with 2 element: position of position and fitness of position
    57. Parameters
    58. ----------
    59. minmax
    60. 0 - minimum problem, else - maximum problem
    61. """
    62. position = uniform(self.lb, self.ub)
    63. fitness = self.get_fitness_position(position=position, minmax=minmax)
    64. return [position, fitness]
    65. def get_fitness_position(self, position=None, minmax=0):
    66. """ Assumption that objective function always return the original value
    67. :param position: 1-D numpy array
    68. :param minmax: 0- min problem, 1 - max problem
    69. :return:
    70. """
    71. return self.obj_func(position) if minmax == 0 else 1.0 / (self.obj_func(position) + self.EPSILON)
    72. def get_sorted_pop_and_global_best_solution(self, pop=None, id_fit=None, id_best=None):
    73. """ Sort population and return the sorted population and the best position """
    74. sorted_pop = sorted(pop, key=lambda temp: temp[id_fit])
    75. return sorted_pop, deepcopy(sorted_pop[id_best])
    76. def amend_position(self, position=None):
    77. return clip(position, self.lb, self.ub)
    78. def amend_position_random(self, position=None):
    79. return where(logical_and(self.lb <= position, position <= self.ub), position, uniform(self.lb, self.ub))
    80. def update_sorted_population_and_global_best_solution(self, pop=None, id_best=None, g_best=None):
    81. """ Sort the population and update the current best position. Return the sorted population and the new current best position """
    82. sorted_pop = sorted(pop, key=lambda temp: temp[self.ID_FIT])
    83. current_best = sorted_pop[id_best]
    84. g_best = deepcopy(current_best) if current_best[self.ID_FIT] < g_best[self.ID_FIT] else deepcopy(g_best)
    85. return sorted_pop, g_best

    下面是两种不同的SMA算法的类

    1. class BaseSMA(Root):
    2. """
    3. Modified version of: Slime Mould Algorithm (SMA)
    4. (Slime Mould Algorithm: A New Method for Stochastic Optimization)
    5. Notes:
    6. + Selected 2 unique and random solution to create new solution (not to create variable) --> remove third loop in original version
    7. + Check bound and update fitness after each individual move instead of after the whole population move in the original version
    8. """
    9. ID_WEI = 2
    10. def __init__(self, obj_func=None, lb=None, ub=None, problem_size=50, verbose=True, epoch=750, pop_size=100, z=0.03):
    11. Root.__init__(self, obj_func, lb, ub, problem_size, verbose)
    12. self.epoch = epoch
    13. self.pop_size = pop_size
    14. self.z = z
    15. def create_solution(self, minmax=0):
    16. pos = uniform(self.lb, self.ub)
    17. fit = self.get_fitness_position(pos)
    18. weight = zeros(self.problem_size)
    19. return [pos, fit, weight]
    20. def train(self):
    21. pop = [self.create_solution() for _ in range(self.pop_size)]
    22. pop, g_best = self.get_sorted_pop_and_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB) # Eq.(2.6)
    23. for epoch in range(self.epoch):
    24. s = pop[0][self.ID_FIT] - pop[-1][self.ID_FIT] + self.EPSILON # plus eps to avoid denominator zero
    25. # calculate the fitness weight of each slime mold
    26. for i in range(0, self.pop_size):
    27. # Eq.(2.5)
    28. if i <= int(self.pop_size / 2):
    29. pop[i][self.ID_WEI] = 1 + uniform(0, 1, self.problem_size) * log10((pop[0][self.ID_FIT] - pop[i][self.ID_FIT]) / s + 1)
    30. else:
    31. pop[i][self.ID_WEI] = 1 - uniform(0, 1, self.problem_size) * log10((pop[0][self.ID_FIT] - pop[i][self.ID_FIT]) / s + 1)
    32. a = arctanh(-((epoch + 1) / self.epoch) + 1) # Eq.(2.4)
    33. b = 1 - (epoch + 1) / self.epoch
    34. # Update the Position of search agents
    35. for i in range(0, self.pop_size):
    36. if uniform() < self.z: # Eq.(2.7)
    37. pos_new = uniform(self.lb, self.ub)
    38. else:
    39. p = tanh(abs(pop[i][self.ID_FIT] - g_best[self.ID_FIT])) # Eq.(2.2)
    40. vb = uniform(-a, a, self.problem_size) # Eq.(2.3)
    41. vc = uniform(-b, b, self.problem_size)
    42. # two positions randomly selected from population, apply for the whole problem size instead of 1 variable
    43. id_a, id_b = choice(list(set(range(0, self.pop_size)) - {i}), 2, replace=False)
    44. pos_1 = g_best[self.ID_POS] + vb * (pop[i][self.ID_WEI] * pop[id_a][self.ID_POS] - pop[id_b][self.ID_POS])
    45. pos_2 = vc * pop[i][self.ID_POS]
    46. pos_new = where(uniform(0, 1, self.problem_size) < p, pos_1, pos_2)
    47. # Check bound and re-calculate fitness after each individual move
    48. pos_new = self.amend_position(pos_new)
    49. fit_new = self.get_fitness_position(pos_new)
    50. pop[i][self.ID_POS] = pos_new
    51. pop[i][self.ID_FIT] = fit_new
    52. # Sorted population and update the global best
    53. pop, g_best = self.update_sorted_population_and_global_best_solution(pop, self.ID_MIN_PROB, g_best)
    54. self.loss_train.append(g_best[self.ID_FIT])
    55. if self.verbose:
    56. print("> Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
    57. self.solution = g_best
    58. return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
    59. class OriginalSMA(Root):
    60. """
    61. The original version of: Slime Mould Algorithm (SMA)
    62. (Slime Mould Algorithm: A New Method for Stochastic Optimization)
    63. Link:
    64. https://doi.org/10.1016/j.future.2020.03.055
    65. """
    66. ID_WEI = 2
    67. def __init__(self, obj_func=None, lb=None, ub=None, problem_size=50, verbose=True, epoch=750, pop_size=100, z=0.03):
    68. Root.__init__(self, obj_func, lb, ub, problem_size, verbose)
    69. self.epoch = epoch
    70. self.pop_size = pop_size
    71. self.z = z
    72. def create_solution(self, minmax=0):
    73. pos = uniform(self.lb, self.ub)
    74. fit = self.get_fitness_position(pos)
    75. weight = zeros(self.problem_size)
    76. return [pos, fit, weight]
    77. def train(self):
    78. pop = [self.create_solution() for _ in range(self.pop_size)]
    79. pop, g_best = self.get_sorted_pop_and_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB) # Eq.(2.6)
    80. for epoch in range(self.epoch):
    81. s = pop[0][self.ID_FIT] - pop[-1][self.ID_FIT] + self.EPSILON # plus eps to avoid denominator zero
    82. # calculate the fitness weight of each slime mold
    83. for i in range(0, self.pop_size):
    84. # Eq.(2.5)
    85. if i <= int(self.pop_size / 2):
    86. pop[i][self.ID_WEI] = 1 + uniform(0, 1, self.problem_size) * log10((pop[0][self.ID_FIT] - pop[i][self.ID_FIT]) / s + 1)
    87. else:
    88. pop[i][self.ID_WEI] = 1 - uniform(0, 1, self.problem_size) * log10((pop[0][self.ID_FIT] - pop[i][self.ID_FIT]) / s + 1)
    89. a = arctanh(-((epoch + 1) / self.epoch) + 1) # Eq.(2.4)
    90. b = 1 - (epoch + 1) / self.epoch
    91. # Update the Position of search agents
    92. for i in range(0, self.pop_size):
    93. if uniform() < self.z: # Eq.(2.7)
    94. pop[i][self.ID_POS] = uniform(self.lb, self.ub)
    95. else:
    96. p = tanh(abs(pop[i][self.ID_FIT] - g_best[self.ID_FIT])) # Eq.(2.2)
    97. vb = uniform(-a, a, self.problem_size) # Eq.(2.3)
    98. vc = uniform(-b, b, self.problem_size)
    99. for j in range(0, self.problem_size):
    100. # two positions randomly selected from population
    101. id_a, id_b = choice(list(set(range(0, self.pop_size)) - {i}), 2, replace=False)
    102. if uniform() < p: # Eq.(2.1)
    103. pop[i][self.ID_POS][j] = g_best[self.ID_POS][j] + vb[j] * (
    104. pop[i][self.ID_WEI][j] * pop[id_a][self.ID_POS][j] - pop[id_b][self.ID_POS][j])
    105. else:
    106. pop[i][self.ID_POS][j] = vc[j] * pop[i][self.ID_POS][j]
    107. # Check bound and re-calculate fitness after the whole population move
    108. for i in range(0, self.pop_size):
    109. pos_new = self.amend_position(pop[i][self.ID_POS])
    110. fit_new = self.get_fitness_position(pos_new)
    111. pop[i][self.ID_POS] = pos_new
    112. pop[i][self.ID_FIT] = fit_new
    113. # Sorted population and update the global best
    114. pop, g_best = self.update_sorted_population_and_global_best_solution(pop, self.ID_MIN_PROB, g_best)
    115. self.loss_train.append(g_best[self.ID_FIT])
    116. if self.verbose:
    117. print("> Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
    118. self.solution = g_best
    119. return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train

    定义问题参数,可以定义任意自己想优化的问题,寻找最小值

    1. #from SMA import BaseSMA, OriginalSMA
    2. #from numpy import sum, pi, exp, sqrt, cos
    3. import numpy as np
    4. ## You can create whatever function you want here
    5. def func_sum(solution):
    6. return np.sum(solution ** 2)
    7. def func_ackley(solution):
    8. a, b, c = 20, 0.2, 2 * np.pi
    9. d = len(solution)
    10. sum_1 = -a * np.exp(-b * np.sqrt(np.sum(solution ** 2) / d))
    11. sum_2 = np.exp(np.sum(np.cos(c * solution)) / d)
    12. return sum_1 - sum_2 + a + np.exp(1)
    13. ## You can create different bound for each dimension like this
    14. # lb = [-15, -10, -3, -15, -10, -3, -15, -10, -3, -15, -10, -3, -15, -10, -3, -100, -40, -50]
    15. # ub = [15, 10, 3, 15, 10, 3, 15, 10, 3, 15, 10, 3, 15, 10, 3, 20, 200, 1000]
    16. # problem_size = 18
    17. ## if you choose this way, the problem_size need to be same length as lb and ub
    18. ## Or bound is the same for all dimension like this
    19. lb = [-100]
    20. ub = [100]
    21. problem_size = 100
    22. ## if you choose this way, the problem_size can be anything you want
    23. ## Setting parameters
    24. obj_func = func_ackley
    25. verbose = True
    26. epoch = 100
    27. pop_size = 50

    开始使用两种SMA进行测试

    第一种基础的SMA

    1. md1 = BaseSMA(obj_func, lb, ub, problem_size, verbose, epoch, pop_size)
    2. best_pos1, best_fit1, list_loss1 = md1.train()
    3. # return : the global best solution, the fitness of global best solution and the loss of training process in each epoch/iteration
    4. print(md1.solution[0])
    5. print(md1.solution[1])
    6. #print(md1.loss_train)

     verbose开着的话会显示每轮迭代的损失的大小,md1.solution[0]打印最优的X,md1.solution[1]打印最优的y值。

    md1.loss_train可以返回每轮迭代的损失y的变化,是一个数组,画图什么很方便。


    第二种SMA

    1. md2 = OriginalSMA(obj_func, lb, ub, problem_size, verbose, epoch, pop_size)
    2. best_pos2, best_fit2, list_loss2 = md2.train()
    3. # return : the global best solution, the fitness of global best solution and the loss of training process in each epoch/iteration
    4. print(best_pos2)
    5. print(best_fit2)
    6. #print(list_loss2)

     返回值都差不多,但是这种SMA 的运行时间有点略微的长,想来是更加复杂点。


    如有兴趣研究原理的同学可以进下面的链接,是这篇算法的论文的链接。

    Paper:https://doi.org/10.1016/j.future.2020.03.055

  • 相关阅读:
    【碰碰球】弹珠游戏-微信小程序项目开发流程详解
    Android ABI
    第144篇:阿里低开项目 init方法
    vue3探索——5分钟快速上手大菠萝pinia
    INC公司和LLC公司的区别
    Python 中泛型的实现
    GBASE 8s 中onclean的用法和场景
    淘宝详情接口
    哪类人群更应注重婚前协议
    axmol引擎支持构建WASM了,非常简单
  • 原文地址:https://blog.csdn.net/weixin_46277779/article/details/126826305