• 强化学习案例复现(1)--- MountainCar基于Q-learning


    1 搭建环境

    1.1 gym自带

    1. import gym
    2. # Create environment
    3. env = gym.make("MountainCar-v0")
    4. eposides = 10
    5. for eq in range(eposides):
    6. obs = env.reset()
    7. done = False
    8. rewards = 0
    9. while not done:
    10. action = env.action_space.sample()
    11. obs, reward, done, action, info = env.step(action)
    12. env.render()
    13. rewards += reward
    14. print(rewards)

    1.2 自行搭建(建议用该方法)

    按照下文搭建MountainCar环境

    往期文章:强化学习实践(三)基于gym搭建自己的环境(在gym0.26.2可运行)-CSDN博客

     

    2.基于Q-learning的模型训练

    1. import gym
    2. import numpy as np
    3. env = gym.make("GridWorld-v0")
    4. # Q-Learning settings
    5. LEARNING_RATE = 0.1 #学习率
    6. DISCOUNT = 0.95 #奖励折扣系数
    7. EPISODES = 100 #迭代次数
    8. SHOW_EVERY = 1000
    9. # Exploration settings
    10. epsilon = 1 # not a constant, qoing to be decayed
    11. START_EPSILON_DECAYING = 1
    12. END_EPSILON_DECAYING = EPISODES//2
    13. epsilon_decay_value = epsilon/(END_EPSILON_DECAYING - START_EPSILON_DECAYING)
    14. DISCRETE_OS_SIZE = [20, 20]
    15. discrete_os_win_size = (env.observation_space.high - env.observation_space.low) / DISCRETE_OS_SIZE
    16. print(discrete_os_win_size)
    17. def get_discrete_state(state):
    18. discrete_state = (state - env.observation_space.low)/discrete_os_win_size
    19. # discrete_state = np.array(state - env.observation_space.low, dtype=float) / discrete_os_win_size
    20. return tuple(discrete_state.astype(np.int64)) # we use this tuple to look up the 3 Q values for the available actions in the q-
    21. q_table = np.random.uniform(low=-2, high=0, size=(DISCRETE_OS_SIZE + [env.action_space.n]))
    22. for episode in range(EPISODES):
    23. state = env.reset()
    24. discrete_state = get_discrete_state(state)
    25. if episode % SHOW_EVERY == 0:
    26. render = True
    27. print(episode)
    28. else:
    29. render = False
    30. done = False
    31. while not done:
    32. if np.random.random() > epsilon:
    33. # Get action from Q table
    34. action = np.argmax(q_table[discrete_state])
    35. else:
    36. # Get random action
    37. action = np.random.randint(0, env.action_space.n)
    38. new_state, reward, done, _, c = env.step(action)
    39. new_discrete_state = get_discrete_state(new_state)
    40. # If simulation did not end yet after last step - update Q table
    41. if not done:
    42. # Maximum possible Q value in next step (for new state)
    43. max_future_q = np.max(q_table[new_discrete_state])
    44. # Current Q value (for current state and performed action)
    45. current_q = q_table[discrete_state + (action,)]
    46. # And here's our equation for a new Q value for current state and action
    47. new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
    48. # Update Q table with new Q value
    49. q_table[discrete_state + (action,)] = new_q
    50. # Simulation ended (for any reson) - if goal position is achived - update Q value with reward directly
    51. elif new_state[0] >= env.goal_position:
    52. # q_table[discrete_state + (action,)] = reward
    53. q_table[discrete_state + (action,)] = 0
    54. print("we made it on episode {}".format(episode))
    55. discrete_state = new_discrete_state
    56. if render:
    57. env.render()
    58. # Decaying is being done every episode if episode number is within decaying range
    59. if END_EPSILON_DECAYING >= episode >= START_EPSILON_DECAYING:
    60. epsilon -= epsilon_decay_value
    61. np.save("q_table.npy", arr=q_table)
    62. env.close()

    3.模型测试

    1. import gym
    2. import numpy as np
    3. env = gym.make("GridWorld-v0")
    4. # Q-Learning settings
    5. LEARNING_RATE = 0.1
    6. DISCOUNT = 0.95
    7. EPISODES = 10
    8. DISCRETE_OS_SIZE = [20, 20]
    9. discrete_os_win_size = (env.observation_space.high - env.observation_space.low) / DISCRETE_OS_SIZE
    10. def get_discrete_state(state):
    11. discrete_state = (state - env.observation_space.low)/discrete_os_win_size
    12. return tuple(discrete_state.astype(np.int64)) # we use this tuple to look up the 3 Q values for the available actions in the q-
    13. q_table = np.load(file="q_table.npy")
    14. for episode in range(EPISODES):
    15. state = env.reset()
    16. discrete_state = get_discrete_state(state)
    17. rewards = 0
    18. done = False
    19. while not done:
    20. # Get action from Q table
    21. action = np.argmax(q_table[discrete_state])
    22. new_state, reward, done, _, c = env.step(action)
    23. new_discrete_state = get_discrete_state(new_state)
    24. rewards += reward
    25. # If simulation did not end yet after last step - update Q table
    26. if done and new_state[0] >= env.goal_position:
    27. print("we made it on episode {}, rewards {}".format(episode, rewards))
    28. discrete_state = new_discrete_state
    29. env.render()
    30. env.close()

  • 相关阅读:
    WinForm的控件二次开发
    力扣数据库题库学习(4.24日)
    Python反射机制
    Yolov8目标识别与实例分割——算法原理详细解析
    昨天访问量破记录
    使用主成分分析进行模态分解(Matlab代码实现)
    Eureka服务下线太慢,电话被告警打爆了
    拒绝访问硬盘拒绝访问的找回方法
    飞天使-sql查询基础
    免费word转换pdf的软件
  • 原文地址:https://blog.csdn.net/weixin_48878618/article/details/133842150