-
Notifications
You must be signed in to change notification settings - Fork 26
/
Copy pathagent.py
78 lines (74 loc) · 3.07 KB
/
agent.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
#!/usr/bin/env python
# coding=utf-8
'''
Author: John
Email: [email protected]
Date: 2021-03-12 16:14:34
LastEditor: John
LastEditTime: 2022-12-03 18:12:50
Discription:
Environment:
'''
import numpy as np
from collections import defaultdict
import torch
import dill
class Agent:
''' On-Policy First-Visit MC Control
'''
def __init__(self,cfg):
self.n_actions = cfg.n_actions
self.epsilon = cfg.epsilon
self.gamma = cfg.gamma
self.Q_table = defaultdict(lambda: np.zeros(cfg.n_actions))
self.returns_sum = defaultdict(float) # 保存return之和
self.returns_count = defaultdict(float)
def sample_action(self,state):
state = str(state)
if state in self.Q_table.keys():
best_action = np.argmax(self.Q_table[state])
action_probs = np.ones(self.n_actions, dtype=float) * self.epsilon / self.n_actions
action_probs[best_action] += (1.0 - self.epsilon)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
else:
action = np.random.randint(0,self.n_actions)
return action
def predict_action(self,state):
state = str(state)
if state in self.Q_table.keys():
best_action = np.argmax(self.Q_table[state])
action_probs = np.ones(self.n_actions, dtype=float) * self.epsilon / self.n_actions
action_probs[best_action] += (1.0 - self.epsilon)
action = np.argmax(self.Q_table[state])
else:
action = np.random.randint(0,self.n_actions)
return action
def update(self,one_ep_transition):
# Find all (state, action) pairs we've visited in this one_ep_transition
# We convert each state to a tuple so that we can use it as a dict key
sa_in_episode = set([(str(x[0]), x[1]) for x in one_ep_transition])
for state, action in sa_in_episode:
sa_pair = (state, action)
# Find the first occurence of the (state, action) pair in the one_ep_transition
first_occurence_idx = next(i for i,x in enumerate(one_ep_transition)
if str(x[0]) == state and x[1] == action)
# Sum up all rewards since the first occurance
G = sum([x[2]*(self.gamma**i) for i,x in enumerate(one_ep_transition[first_occurence_idx:])])
# Calculate average return for this state over all sampled episodes
self.returns_sum[sa_pair] += G
self.returns_count[sa_pair] += 1.0
self.Q_table[state][action] = self.returns_sum[sa_pair] / self.returns_count[sa_pair]
def save_model(self,path=None):
'''把 Q表格 的数据保存到文件中
'''
from pathlib import Path
Path(path).mkdir(parents=True, exist_ok=True)
torch.save(
obj=self.Q_table,
f=path+"Q_table",
pickle_module=dill
)
def load_model(self, path=None):
'''从文件中读取数据到 Q表格
'''
self.Q_table =torch.load(f=path+"Q_table",pickle_module=dill)