decided to fuck everything up
This commit is contained in:
3
.gitignore
vendored
3
.gitignore
vendored
@@ -6,3 +6,6 @@
|
|||||||
|
|
||||||
# Inputs
|
# Inputs
|
||||||
*.txt
|
*.txt
|
||||||
|
|
||||||
|
# Latex
|
||||||
|
build/
|
||||||
|
|||||||
49
main.py
49
main.py
@@ -13,10 +13,9 @@ mpl.use('TkAgg') # fixes my macOS bug
|
|||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
|
||||||
P = 0.1
|
P = 0.1 # Slip probability
|
||||||
ALPHA = 0.90
|
ALPHA = 0.90 # Discount factor
|
||||||
EPSILON = 1e-12
|
|
||||||
# EPSILON = 1e-12 # Convergence criterium
|
|
||||||
A2 = np.array([ # Action index to action mapping
|
A2 = np.array([ # Action index to action mapping
|
||||||
[-1, 0], # Up
|
[-1, 0], # Up
|
||||||
[ 1, 0], # Down
|
[ 1, 0], # Down
|
||||||
@@ -31,7 +30,7 @@ S_TO_IJ = None # Mapping of state vector to coordinates
|
|||||||
SN = None # Number of states
|
SN = None # Number of states
|
||||||
U_OF_X = None # The allowed action space matrix representation
|
U_OF_X = None # The allowed action space matrix representation
|
||||||
PW_OF_X_U = None # The probability distribution of disturbance
|
PW_OF_X_U = None # The probability distribution of disturbance
|
||||||
G1_X = None # The cost function vector representation (depends only on state)
|
G1_X = None # The cost function vector representation
|
||||||
G2_X = None # The second cost function vector representation
|
G2_X = None # The second cost function vector representation
|
||||||
F_X_U_W = None # The System Equation
|
F_X_U_W = None # The System Equation
|
||||||
|
|
||||||
@@ -63,6 +62,7 @@ def init_global(maze_filename):
|
|||||||
|
|
||||||
S_TO_IJ = np.indices(MAZE.shape).transpose(1, 2, 0)[state_mask]
|
S_TO_IJ = np.indices(MAZE.shape).transpose(1, 2, 0)[state_mask]
|
||||||
SN = len(S_TO_IJ)
|
SN = len(S_TO_IJ)
|
||||||
|
|
||||||
ij_to_s = np.zeros(MAZE.shape, dtype=np.int32)
|
ij_to_s = np.zeros(MAZE.shape, dtype=np.int32)
|
||||||
ij_to_s[state_mask] = np.arange(SN)
|
ij_to_s[state_mask] = np.arange(SN)
|
||||||
|
|
||||||
@@ -99,6 +99,12 @@ def init_global(maze_filename):
|
|||||||
|
|
||||||
|
|
||||||
def plot_j_policy_on_maze(j, policy):
|
def plot_j_policy_on_maze(j, policy):
|
||||||
|
j_norm = (j - j.min()) / (j.max() - j.min()) + 1e-50
|
||||||
|
j_log = np.log10(j_norm)
|
||||||
|
print(j)
|
||||||
|
print(j_norm)
|
||||||
|
print(j_log)
|
||||||
|
print('-' * 50)
|
||||||
heatmap = np.full(MAZE.shape, np.nan)
|
heatmap = np.full(MAZE.shape, np.nan)
|
||||||
heatmap[S_TO_IJ[:, 0], S_TO_IJ[:, 1]] = j
|
heatmap[S_TO_IJ[:, 0], S_TO_IJ[:, 1]] = j
|
||||||
cmap = mpl.cm.get_cmap('coolwarm')
|
cmap = mpl.cm.get_cmap('coolwarm')
|
||||||
@@ -147,13 +153,16 @@ def policy_iteration(j, g):
|
|||||||
return policy, j
|
return policy, j
|
||||||
|
|
||||||
|
|
||||||
def _terminate(j, j_old, policy, policy_old):
|
def _terminate_pi(j, j_old, policy, policy_old):
|
||||||
# eps = EPSILON
|
|
||||||
# return np.abs(j - j_old).max() < eps
|
|
||||||
return np.all(policy == policy_old)
|
return np.all(policy == policy_old)
|
||||||
|
|
||||||
|
|
||||||
def dynamic_programming(optimizer_step, g, return_history=False):
|
def _terminate_vi(j, j_old, policy, policy_old):
|
||||||
|
eps = ALPHA**SN
|
||||||
|
return np.abs(j - j_old).max() < eps
|
||||||
|
|
||||||
|
|
||||||
|
def dynamic_programming(optimizer_step, g, terminator, return_history=False):
|
||||||
j = np.zeros(SN, dtype=np.float64)
|
j = np.zeros(SN, dtype=np.float64)
|
||||||
policy = None
|
policy = None
|
||||||
history = []
|
history = []
|
||||||
@@ -163,7 +172,7 @@ def dynamic_programming(optimizer_step, g, return_history=False):
|
|||||||
policy, j = optimizer_step(j, g)
|
policy, j = optimizer_step(j, g)
|
||||||
if return_history:
|
if return_history:
|
||||||
history.append(j)
|
history.append(j)
|
||||||
if _terminate(j, j_old, policy, policy_old):
|
if terminator(j, j_old, policy, policy_old):
|
||||||
break
|
break
|
||||||
if not return_history:
|
if not return_history:
|
||||||
return j, policy
|
return j, policy
|
||||||
@@ -185,19 +194,20 @@ if __name__ == '__main__':
|
|||||||
costs = {'g1': G1_X, 'g2': G2_X}
|
costs = {'g1': G1_X, 'g2': G2_X}
|
||||||
optimizers = {'Value Iteration': value_iteration,
|
optimizers = {'Value Iteration': value_iteration,
|
||||||
'Policy Iteration': policy_iteration}
|
'Policy Iteration': policy_iteration}
|
||||||
|
terminators = {'Value Iteration': _terminate_vi,
|
||||||
|
'Policy Iteration': _terminate_pi}
|
||||||
|
|
||||||
for a in [0.9, 0.5, 0.01]:
|
for a in [0.9, 0.5, 0.01]:
|
||||||
plt.figure()
|
plt.figure(figsize=(9, 6))
|
||||||
|
plt.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95)
|
||||||
plt.suptitle('DISCOUNT = ' + str(a))
|
plt.suptitle('DISCOUNT = ' + str(a))
|
||||||
i = 1
|
i = 1
|
||||||
for opt in ['Value Iteration', 'Policy Iteration']:
|
for opt in ['Value Iteration', 'Policy Iteration']:
|
||||||
for cost in ['g1', 'g2']:
|
for cost in ['g1', 'g2']:
|
||||||
name = ' / '.join([opt, cost])
|
name = '{} / {}'.format(opt, cost)
|
||||||
ALPHA = a
|
ALPHA = a
|
||||||
j, policy = dynamic_programming(optimizers[opt], costs[cost])
|
j, policy = dynamic_programming(optimizers[opt], costs[cost],
|
||||||
print(name)
|
terminators[opt])
|
||||||
print(j)
|
|
||||||
# print(name, j)
|
|
||||||
plt.subplot(2, 2, i)
|
plt.subplot(2, 2, i)
|
||||||
plt.gca().set_title(name)
|
plt.gca().set_title(name)
|
||||||
plot_j_policy_on_maze(j, policy)
|
plot_j_policy_on_maze(j, policy)
|
||||||
@@ -205,15 +215,16 @@ if __name__ == '__main__':
|
|||||||
|
|
||||||
# Error graphs
|
# Error graphs
|
||||||
for opt in ['Value Iteration', 'Policy Iteration']:
|
for opt in ['Value Iteration', 'Policy Iteration']:
|
||||||
plt.figure()
|
plt.figure(figsize=(9, 6))
|
||||||
plt.subplots_adjust(wspace=0.45, hspace=0.45)
|
plt.subplots_adjust(wspace=0.4, hspace=0.4)
|
||||||
plt.suptitle(opt)
|
plt.suptitle(opt)
|
||||||
i = 1
|
i = 1
|
||||||
for cost in ['g1', 'g2']:
|
for cost in ['g1', 'g2']:
|
||||||
for a in [0.9, 0.8, 0.7]:
|
for a in [0.99, 0.7, 0.5]:
|
||||||
name = 'Cost: {}, discount: {}'.format(cost, a)
|
name = 'Cost: {}, discount: {}'.format(cost, a)
|
||||||
ALPHA = a
|
ALPHA = a
|
||||||
history = dynamic_programming(optimizers[opt], costs[cost],
|
history = dynamic_programming(optimizers[opt], costs[cost],
|
||||||
|
terminators[opt],
|
||||||
return_history=True)
|
return_history=True)
|
||||||
plt.subplot(2, 3, i)
|
plt.subplot(2, 3, i)
|
||||||
plt.gca().set_title(name)
|
plt.gca().set_title(name)
|
||||||
|
|||||||
20
report.latex
Normal file
20
report.latex
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
\documentclass{article}
|
||||||
|
\usepackage[a4paper, margin=1in]{geometry}
|
||||||
|
\usepackage{amsmath}
|
||||||
|
\usepackage{fancyhdr}
|
||||||
|
\pagestyle{fancy}
|
||||||
|
\usepackage{lastpage}
|
||||||
|
\cfoot{Page \thepage\ of \pageref{LastPage}}
|
||||||
|
\rhead{Pavel Lutskov, 03654990}
|
||||||
|
\lhead{Programming Assignment}
|
||||||
|
\title{\huge Approximate Dynamic Programming and Reinforcement Learning \\
|
||||||
|
\Large Programming Assignment}
|
||||||
|
% \subtitle{Assignment 1}
|
||||||
|
\author{Pavel Lutskov, 03654990}
|
||||||
|
\begin{document}
|
||||||
|
\maketitle
|
||||||
|
|
||||||
|
\section{Environment modeling}
|
||||||
|
|
||||||
|
Blya ya zamodeliroval environment.
|
||||||
|
\end{document}
|
||||||
Reference in New Issue
Block a user