OpenAI Gym에서 제공하는 Pendulum-v0 환경을 대상으로 DDPG 알고리즘을 Tensorflow2 코드로 구현하였다.
학습결과는 다음과 같다. DDPG는 오프-폴리시 방법으로서 온-폴리시인 A2C에 비해서 데이터 효율이 월등히 좋은 것을 알 수 있다. 200회의 에피소드만에 학습이 완료됐다.
다음은 학습이 끝난 후 진자(pendulum)의 움직임이다.
DDPG 코드는 액터-크리틱 신경망을 구현하고 학습시키기 위한 ddpg_learn.py, 이를 실행시키기 위한 ddpg_main.py, 학습을 마친 신경망 파라미터를 읽어와 에이전트를 구동하기 위한 ddpg_load_play.py, 그리고 리플레이 버퍼를 구현한 replaybuffer.py로 구성되어 있다.
전체 코드 구조는 다음과 같다.
다음은 Tensorflow2 코드다.
ddpg_learn.py
# DDPG Actor (tf2 subclassing version: using chain rule to train Actor)
# coded by St.Watermelon
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Lambda, concatenate
from tensorflow.keras.optimizers import Adam
import tensorflow as tf
from replaybuffer import ReplayBuffer
# actor network
class Actor(Model):
def __init__(self, action_dim, action_bound):
super(Actor, self).__init__()
self.action_bound = action_bound
self.h1 = Dense(64, activation='relu')
self.h2 = Dense(32, activation='relu')
self.h3 = Dense(16, activation='relu')
self.action = Dense(action_dim, activation='tanh')
def call(self, state):
x = self.h1(state)
x = self.h2(x)
x = self.h3(x)
a = self.action(x)
# Scale output to [-action_bound, action_bound]
a = Lambda(lambda x: x*self.action_bound)(a)
return a
# critic network
class Critic(Model):
def __init__(self):
super(Critic, self).__init__()
self.x1 = Dense(32, activation='relu')
self.a1 = Dense(32, activation='relu')
self.h2 = Dense(32, activation='relu')
self.h3 = Dense(16, activation='relu')
self.q = Dense(1, activation='linear')
def call(self, state_action):
state = state_action[0]
action = state_action[1]
x = self.x1(state)
a = self.a1(action)
h = concatenate([x, a], axis=-1)
x = self.h2(h)
x = self.h3(x)
q = self.q(x)
return q
class DDPGagent(object):
def __init__(self, env):
## hyperparameters
self.GAMMA = 0.95
self.BATCH_SIZE = 32
self.BUFFER_SIZE = 20000
self.ACTOR_LEARNING_RATE = 0.0001
self.CRITIC_LEARNING_RATE = 0.001
self.TAU = 0.001
self.env = env
# get state dimension
self.state_dim = env.observation_space.shape[0]
# get action dimension
self.action_dim = env.action_space.shape[0]
# get action bound
self.action_bound = env.action_space.high[0]
## create actor and critic networks
self.actor = Actor(self.action_dim, self.action_bound)
self.target_actor = Actor(self.action_dim, self.action_bound)
self.critic = Critic()
self.target_critic = Critic()
self.actor.build(input_shape=(None, self.state_dim))
self.target_actor.build(input_shape=(None, self.state_dim))
state_in = Input((self.state_dim,))
action_in = Input((self.action_dim,))
self.critic([state_in, action_in])
self.target_critic([state_in, action_in])
self.actor.summary()
self.critic.summary()
# optimizer
self.actor_opt = Adam(self.ACTOR_LEARNING_RATE)
self.critic_opt = Adam(self.CRITIC_LEARNING_RATE)
## initialize replay buffer
self.buffer = ReplayBuffer(self.BUFFER_SIZE)
# save the results
self.save_epi_reward = []
## transfer actor weights to target actor with a tau
def update_target_network(self, TAU):
theta = self.actor.get_weights()
target_theta = self.target_actor.get_weights()
for i in range(len(theta)):
target_theta[i] = TAU * theta[i] + (1 - TAU) * target_theta[i]
self.target_actor.set_weights(target_theta)
phi = self.critic.get_weights()
target_phi = self.target_critic.get_weights()
for i in range(len(phi)):
target_phi[i] = TAU * phi[i] + (1 - TAU) * target_phi[i]
self.target_critic.set_weights(target_phi)
## single gradient update on a single batch data
def critic_learn(self, states, actions, td_targets):
with tf.GradientTape() as tape:
q = self.critic([states, actions], training=True)
loss = tf.reduce_mean(tf.square(q-td_targets))
grads = tape.gradient(loss, self.critic.trainable_variables)
self.critic_opt.apply_gradients(zip(grads, self.critic.trainable_variables))
## train the actor network
def actor_learn(self, states):
with tf.GradientTape() as tape:
actions = self.actor(states, training=True)
critic_q = self.critic([states, actions])
loss = -tf.reduce_mean(critic_q)
grads = tape.gradient(loss, self.actor.trainable_variables)
self.actor_opt.apply_gradients(zip(grads, self.actor.trainable_variables))
## Ornstein Uhlenbeck Noise
def ou_noise(self, x, rho=0.15, mu=0, dt=1e-1, sigma=0.2, dim=1):
return x + rho*(mu - x)*dt + sigma*np.sqrt(dt)*np.random.normal(size=dim)
## computing TD target: y_k = r_k + gamma*Q(x_k+1, u_k+1)
def td_target(self, rewards, q_values, dones):
y_k = np.asarray(q_values)
for i in range(q_values.shape[0]): # number of batch
if dones[i]:
y_k[i] = rewards[i]
else:
y_k[i] = rewards[i] + self.GAMMA * q_values[i]
return y_k
## load actor weights
def load_weights(self, path):
self.actor.load_weights(path + 'pendulum_actor.h5')
self.critic.load_weights(path + 'pendulum_critic.h5')
## train the agent
def train(self, max_episode_num):
# initial transfer model weights to target model network
self.update_target_network(1.0)
for ep in range(int(max_episode_num)):
# reset OU noise
pre_noise = np.zeros(self.action_dim)
# reset episode
time, episode_reward, done = 0, 0, False
# reset the environment and observe the first state
state = self.env.reset()
while not done:
# visualize the environment
#self.env.render()
# pick an action: shape = (1,)
action = self.actor(tf.convert_to_tensor([state], dtype=tf.float32))
action = action.numpy()[0]
noise = self.ou_noise(pre_noise, dim=self.action_dim)
# clip continuous action to be within action_bound
action = np.clip(action + noise, -self.action_bound, self.action_bound)
# observe reward, new_state
next_state, reward, done, _ = self.env.step(action)
# add transition to replay buffer
train_reward = (reward + 8) / 8
self.buffer.add_buffer(state, action, train_reward, next_state, done)
if self.buffer.buffer_count() > 1000: # start train after buffer has some amounts
# sample transitions from replay buffer
states, actions, rewards, next_states, dones = self.buffer.sample_batch(self.BATCH_SIZE)
# predict target Q-values
target_qs = self.target_critic([tf.convert_to_tensor(next_states, dtype=tf.float32),
self.target_actor(
tf.convert_to_tensor(next_states, dtype=tf.float32))])
# compute TD targets
y_i = self.td_target(rewards, target_qs.numpy(), dones)
# train critic using sampled batch
self.critic_learn(tf.convert_to_tensor(states, dtype=tf.float32),
tf.convert_to_tensor(actions, dtype=tf.float32),
tf.convert_to_tensor(y_i, dtype=tf.float32))
# train actor
self.actor_learn(tf.convert_to_tensor(states, dtype=tf.float32))
# update both target network
self.update_target_network(self.TAU)
# update current state
pre_noise = noise
state = next_state
episode_reward += reward
time += 1
## display rewards every episode
print('Episode: ', ep+1, 'Time: ', time, 'Reward: ', episode_reward)
self.save_epi_reward.append(episode_reward)
## save weights every episode
#print('Now save')
self.actor.save_weights("./save_weights/pendulum_actor.h5")
self.critic.save_weights("./save_weights/pendulum_critic.h5")
np.savetxt('./save_weights/pendulum_epi_reward.txt', self.save_epi_reward)
print(self.save_epi_reward)
## save them to file if done
def plot_result(self):
plt.plot(self.save_epi_reward)
plt.show()
ddpg_main.py
# DDPG main (tf2 subclassing API version)
# coded by St.Watermelon
import gym
from ddpg_learn import DDPGagent
def main():
max_episode_num = 200
env = gym.make("Pendulum-v0")
agent = DDPGagent(env)
agent.train(max_episode_num)
agent.plot_result()
if __name__=="__main__":
main()
ddpg_load_play.py
# DDPG load and play (tf2 subclassing API version)
# coded by St.Watermelon
import gym
from ddpg_learn import DDPGagent
import tensorflow as tf
def main():
env = gym.make("Pendulum-v0")
agent = DDPGagent(env)
agent.load_weights('./save_weights/')
time = 0
state = env.reset()
while True:
env.render()
action = agent.actor(tf.convert_to_tensor([state], dtype=tf.float32)).numpy()[0]
print(action.shape)
state, reward, done, _ = env.step(action)
time += 1
print('Time: ', time, 'Reward: ', reward)
if done:
break
env.close()
if __name__=="__main__":
main()
replaybuffer.py
## 리플레이 버퍼 클래스 파일
# 필요한 패키지 임포트
import numpy as np
from collections import deque
import random
class ReplayBuffer(object):
"""
Reply Buffer
"""
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.buffer = deque()
self.count = 0
## 버퍼에 저장
def add_buffer(self, state, action, reward, next_state, done):
transition = (state, action, reward, next_state, done)
# 버퍼가 꽉 찼는지 확인
if self.count < self.buffer_size:
self.buffer.append(transition)
self.count += 1
else: # 찼으면 가장 오래된 데이터 삭제하고 저장
self.buffer.popleft()
self.buffer.append(transition)
## 버퍼에서 데이터 무작위로 추출 (배치 샘플링)
def sample_batch(self, batch_size):
if self.count < batch_size:
batch = random.sample(self.buffer, self.count)
else:
batch = random.sample(self.buffer, batch_size)
# 상태, 행동, 보상, 다음 상태별로 정리
states = np.asarray([i[0] for i in batch])
actions = np.asarray([i[1] for i in batch])
rewards = np.asarray([i[2] for i in batch])
next_states = np.asarray([i[3] for i in batch])
dones = np.asarray([i[4] for i in batch])
return states, actions, rewards, next_states, dones
## 버퍼 사이즈 계산
def buffer_count(self):
return self.count
## 버퍼 비움
def clear_buffer(self):
self.buffer = deque()
self.count = 0
'AI 딥러닝 > RL' 카테고리의 다른 글
소프트 벨만 방정식 (Soft Bellman Equation) (0) | 2021.05.27 |
---|---|
최대 엔트로피 목적함수 (0) | 2021.05.26 |
DQN에서 DDPG로 (0) | 2021.05.14 |
Tensorflow2로 만든 Double DQN 코드: CartPole-v1 (0) | 2021.05.11 |
Double DQN 알고리즘 (0) | 2021.05.11 |
댓글