-
Notifications
You must be signed in to change notification settings - Fork 3
/
sample_env.py
32 lines (26 loc) · 1.02 KB
/
sample_env.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import gym
class EnvSampler():
def __init__(self, env, max_path_length=1000):
self.env = env
self.path_length = 0
self.current_state = None
self.max_path_length = max_path_length
self.path_rewards = []
self.sum_reward = 0
def sample(self, agent, eval_t=False):
if self.current_state is None:
self.current_state = self.env.reset()
cur_state = self.current_state
action = agent.select_action(self.current_state, eval_t)
next_state, reward, terminal, info = self.env.step(action)
self.path_length += 1
self.sum_reward += reward
# TODO: Save the path to the env_pool
if terminal or self.path_length >= self.max_path_length:
self.current_state = None
self.path_length = 0
self.path_rewards.append(self.sum_reward)
self.sum_reward = 0
else:
self.current_state = next_state
return cur_state, action, next_state, reward, terminal, info