当前位置:   article > 正文

【Python】新鲜出炉的海洋捕食者算法Python版本_捕食者模型python代码

捕食者模型python代码

2020年发表的海洋捕食者算法《Marine Predators Algorithm: A nature-inspired metaheuristic》。

作者只在原论文中给出了MATLAB代码,网上也没有Python版本,我自己用Python重写了MATLAB代码。

  1. """
  2. 2020海洋捕食者算法
  3. """
  4. import numpy as np
  5. import random
  6. import math
  7. def initial(pop, dim, ub, lb):
  8. X = np.zeros([pop, dim])
  9. for i in range(pop):
  10. for j in range(dim):
  11. X[i, j] = random.random() * (ub[j] - lb[j]) + lb[j] # 均匀分布随机初始化
  12. return X, lb, ub
  13. # 将超过边界的直接用边界值赋值
  14. def BorderCheckForOne(x, ub, lb, pop, dim):
  15. if x > ub[0]:
  16. x = ub[0]
  17. elif x < lb[0]:
  18. x = lb[0]
  19. return x
  20. def levy(n, m, beta):
  21. num = math.gamma(1+beta)*math.sin(math.pi*beta/2)
  22. den = math.gamma((1+beta)/2) * beta * 2**((beta-1)/2)
  23. sigma_u = (num/den)**(1/beta)
  24. u = np.random.normal(0,sigma_u,(n,m))
  25. v = np.random.normal(0,1,(n,m))
  26. return u/(np.abs(v)**(1/beta)) ## ^的用法好像有错
  27. def MPA(pop, dim, lb, ub, MaxIter, fun):
  28. Top_predator_pos = np.zeros(dim) #或者np.zeros([1,dim])
  29. Top_predator_fit = float("inf")
  30. Convergence_curve = np.zeros(MaxIter)
  31. stepsize = np.zeros([pop, dim]) # pop×dim
  32. fitness = np.inf * np.ones([pop, 1]) # pop×1
  33. # 初始化种群
  34. X, lb, ub = initial(pop, dim, ub, lb)
  35. Xmin = lb[0] * np.ones([pop, dim])
  36. Xmax = ub[0] * np.ones([pop, dim])
  37. Iter = 0
  38. FADs = 0.2
  39. P = 0.5
  40. while Iter < MaxIter:
  41. # =================== 对上一轮的进行复盘 ============
  42. for i in range(0, pop):
  43. # 1.边界检测
  44. for j in range(0, dim):
  45. X[i, j] = BorderCheckForOne(X[i, j], ub, lb, pop, dim)
  46. # 2.计算每个鲨鱼的适应度值
  47. fitness[i, 0] = fun(X[i, :])
  48. if fitness[i, 0] < Top_predator_fit: # 23个基准函数都是越小越好
  49. Top_predator_fit = fitness[i, 0].copy()
  50. Top_predator_pos = X[i, :].copy()
  51. # =================== Memory saving ===============
  52. if Iter == 0:
  53. fit_old = fitness.copy()
  54. X_old = X.copy()
  55. for i in range(pop):
  56. if fit_old[i, 0] < fitness[i, 0]:
  57. fitness[i, 0] = fit_old[i, 0].copy() # 如果上一轮的位置更好,还是用上一轮的
  58. X[i, :] = X_old[i, :].copy()
  59. fit_old = fitness.copy()
  60. X_old = X.copy()
  61. # =================== Levy=======
  62. Elite = np.ones([pop, 1]) * Top_predator_pos
  63. CF = (1-Iter/MaxIter)**(2*Iter/MaxIter)
  64. RL=0.05*levy(pop, dim, 1.5) # levy返回一个pop×dim的矩阵
  65. RB = np.random.randn(pop, dim) # 满足正态分布的pop×dim大小矩阵
  66. # ===============遍历每个个体==============
  67. for i in range(pop):
  68. for j in range(dim):
  69. R = random.random()
  70. # ================公式12============
  71. if Iter < MaxIter/3:
  72. stepsize[i, j] = RB[i, j] * ( Elite[i, j]-RB[i, j]*X[i, j] )
  73. X[i, j] = X[i, j] + P*R*stepsize[i, j]
  74. # ===============公式1314=======
  75. elif Iter>MaxIter/3 and Iter < 2*MaxIter/3:
  76. if i > pop/2:
  77. stepsize[i, j] = RB[i, j] * (RB[i, j]*Elite[i, j]-X[i, j])
  78. X[i, j] = Elite[i, j] + P*CF*stepsize[i, j]
  79. else:
  80. stepsize[i, j] = RL[i, j] * (Elite[i, j]-RL[i, j]*X[i, j])
  81. X[i, j] = X[i, j] + P * R *stepsize[i, j]
  82. # ==============公式15==============
  83. else:
  84. stepsize[i, j] = RL[i, j]*( RL[i, j]*Elite[i, j]-X[i, j])
  85. X[i, j] = Elite[i, j] + P*CF*stepsize[i, j]
  86. # =================== 对上一轮的进行复盘 ============
  87. for i in range(0, pop):
  88. # 1.边界检测
  89. for j in range(0, dim):
  90. X[i, j] = BorderCheckForOne(X[i, j], ub, lb, pop, dim)
  91. # 2.计算每个鲨鱼的适应度值
  92. fitness[i, 0] = fun(X[i, :])
  93. if fitness[i, 0] < Top_predator_fit: # 23个基准函数都是越小越好
  94. Top_predator_fit = fitness[i, 0].copy()
  95. Top_predator_pos = X[i, :].copy()
  96. # =================== Memory saving ===============
  97. if Iter == 0:
  98. fit_old = fitness.copy()
  99. X_old = X.copy()
  100. for i in range(pop):
  101. if fit_old[i, 0] < fitness[i, 0]:
  102. fitness[i, 0] = fit_old[i, 0].copy() # 如果上一轮的位置更好,还是用上一轮的
  103. X[i, :] = X_old[i, :].copy()
  104. fit_old = fitness.copy()
  105. X_old = X.copy()
  106. # =====================对整体进行一个更新(公式16=====
  107. if random.random() < FADs:
  108. U = (np.random.rand(pop, dim) < FADs)
  109. X = X + CF*np.multiply(Xmin + np.multiply(np.random.rand(pop, dim), (Xmax-Xmin)), U)
  110. else:
  111. r = random.random()
  112. stepsize = (FADs*(1-r)+r) * (X[random.sample(range(0, pop), pop),:] - X[random.sample(range(0, pop), pop),:])
  113. X = X + stepsize
  114. Iter = Iter+1
  115. if Iter!=MaxIter:
  116. Convergence_curve[Iter] = Top_predator_fit
  117. return Top_predator_fit, Top_predator_pos, Convergence_curve

在23个基准函数上跑了一遍,验证得代码正确

fun 1 ---- 4 轮的平均值: 1.590879014464718e-22
fun 2 ---- 4 轮的平均值: 3.1015801972813803e-13
fun 3 ---- 4 轮的平均值: 2.1687101928786233e-05
fun 4 ---- 4 轮的平均值: 2.738516688049143e-09
fun 5 ---- 4 轮的平均值: 24.3651022631242
fun 6 ---- 4 轮的平均值: 1.5518969799868655e-08
fun 7 ---- 4 轮的平均值: 0.0007603777498045276
fun 8 ---- 4 轮的平均值: -9759.428902632117
fun 9 ---- 4 轮的平均值: 0.0
fun 10 ---- 4 轮的平均值: 1.1923795284474181e-12
fun 11 ---- 4 轮的平均值: 0.0
fun 12 ---- 4 轮的平均值: 9.427489581332269e-10
fun 13 ---- 4 轮的平均值: 2.018121184109257e-08
fun 14 ---- 4 轮的平均值: 0.9980038377944498
fun 15 ---- 4 轮的平均值: 0.00030748598780886593
fun 16 ---- 4 轮的平均值: -1.0316284534898776
fun 17 ---- 4 轮的平均值: 0.39788735772973816
fun 18 ---- 4 轮的平均值: 2.999999999999924
fun 19 ---- 4 轮的平均值: -3.862782147820756
fun 20 ---- 4 轮的平均值: -3.3219951715813822
fun 21 ---- 4 轮的平均值: -10.153199679022137
fun 22 ---- 4 轮的平均值: -10.40294056677283
fun 23 ---- 4 轮的平均值: -10.53640981666291
 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/神奇cpp/article/detail/945435
推荐阅读
相关标签
  

闽ICP备14008679号