16.03.2021 Views

Advanced Deep Learning with Keras

Create successful ePaper yourself

Turn your PDF publications into a flip-book with our unique Google optimized e-Paper software.

Deep Reinforcement Learning

from keras.models import Model

from keras.optimizers import Adam

from collections import deque

import numpy as np

import random

import argparse

import gym

from gym import wrappers, logger

class DQNAgent():

def __init__(self, state_space, action_space, args,

episodes=1000):

self.action_space = action_space

# experience buffer

self.memory = []

# discount rate

self.gamma = 0.9

# initially 90% exploration, 10% exploitation

self.epsilon = 0.9

# iteratively applying decay til 10% exploration/90%

exploitation

self.epsilon_min = 0.1

self.epsilon_decay = self.epsilon_min / self.epsilon

self.epsilon_decay = self.epsilon_decay ** (1. /

float(episodes))

# Q Network weights filename

self.weights_file = 'dqn_cartpole.h5'

# Q Network for training

n_inputs = state_space.shape[0]

n_outputs = action_space.n

self.q_model = self.build_model(n_inputs, n_outputs)

self.q_model.compile(loss='mse', optimizer=Adam())

# target Q Network

self.target_q_model = self.build_model(n_inputs, n_outputs)

# copy Q Network params to target Q Network

self.update_weights()

self.replay_counter = 0

self.ddqn = True if args.ddqn else False

[ 298 ]

Hooray! Your file is uploaded and ready to be published.

Saved successfully!

Ooh no, something went wrong!