text stringlengths 0 93.6k |
|---|
return [base_seed + i for i in range(num_seeds)] |
def load_seeds(seed_path): |
seed_path = os.path.expandvars(os.path.expanduser(seed_path)) |
seeds = open(seed_path).readlines() |
return [int(s) for s in seeds] |
def eval_policy( |
args, |
policy, |
num_episodes, |
num_processes=1, |
deterministic=False, |
start_level=0, |
num_levels=0, |
seeds=None, |
level_sampler=None, |
progressbar=None, |
record=False, |
print_score=True, |
advanced_test=False, |
exploration_coeff=1.0 |
): |
if level_sampler: |
start_level = level_sampler.seed_range()[0] |
num_levels = 1 |
eval_envs, level_sampler = make_dqn_lr_venv( |
num_envs=num_processes, |
env_name=args.env_name, |
seeds=seeds, |
device=args.device, |
num_levels=num_levels, |
start_level=start_level, |
no_ret_normalization=args.no_ret_normalization, |
distribution_mode=args.distribution_mode, |
paint_vel_info=args.paint_vel_info, |
level_sampler=level_sampler, |
record_runs=record, |
attach_task_id=args.attach_task_id |
) |
############################################################ |
if args.record_td_error: |
replay_buffer = make_buffer(args, eval_envs) |
level_seeds = torch.zeros(args.num_processes) |
state_deque: List[deque] = [deque(maxlen=args.multi_step) for _ in range(args.num_processes)] |
reward_deque: List[deque] = [deque(maxlen=args.multi_step) for _ in range(args.num_processes)] |
action_deque: List[deque] = [deque(maxlen=args.multi_step) for _ in range(args.num_processes)] |
expect_new_seed: List[bool] = [False for _ in range(args.num_processes)] |
############################################################ |
eval_episode_rewards: List[float] = [] |
if level_sampler: |
state, _ = eval_envs.reset() |
else: |
state = eval_envs.reset() |
while len(eval_episode_rewards) < num_episodes: |
if advanced_test: |
action = policy.sample_action(state, c=exploration_coeff) |
elif not deterministic and np.random.uniform() < args.eval_eps: |
action = ( |
torch.LongTensor([eval_envs.action_space.sample() for _ in range(num_processes)]) |
.reshape(-1, 1) |
.to(args.device) |
) |
# if not deterministic: |
# action, _ = policy.sample_action(state, temperature=0.05) |
else: |
with torch.no_grad(): |
action, _ = policy.select_action(state, eval=True) |
next_state, reward, done, infos = eval_envs.step(action) |
############################################################ |
if args.record_td_error: |
for i, info in enumerate(infos): |
if "bad_transition" in info.keys(): |
print("Bad transition") |
state_deque[i].append(state[i]) |
reward_deque[i].append(reward[i]) |
action_deque[i].append(action[i]) |
if len(state_deque[i]) == args.multi_step or done[i]: |
temp_reward = reward_deque[i] |
# if args.reward_clip > 0: |
# temp_reward = np.clip(temp_reward, -args.reward_clip, args.reward_clip) |
n_reward = multi_step_reward(temp_reward, args.gamma) |
n_state = state_deque[i][0] |
n_action = action_deque[i][0] |
replay_buffer.add( |
n_state, |
n_action, |
next_state[i], |
n_reward, |
np.uint8(done[i]), |
level_seeds[i], |
) |
if done[i]: |
reward_deque_i = list(reward_deque[i]) |
for j in range(1, len(reward_deque_i)): |
n_reward = multi_step_reward(reward_deque_i[j:], args.gamma) |
n_state = state_deque[i][j] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.