text stringlengths 0 93.6k |
|---|
if t % 500 == 0: |
wandb.log({"Current Epsilon": cur_epsilon}, step=t * args.num_processes) |
# Reset linear layer |
if args.reset_interval > 0 and t % args.reset_interval == 0: |
print(f"Resetting at step {t}") |
for name, module in agent.Q.named_children(): |
for keyword in ["linear", "fc"]: |
if keyword in name: |
init_(module) |
agent.Q_target = copy.deepcopy(agent.Q) |
if t % 500 and not args.qrdqn or args.c51: |
advantages = agent.advantage(state, epsilon(t)) |
mean_max_advantage = advantages.max(1)[0].mean() |
mean_min_advantage = advantages.min(1)[0].mean() |
wandb.log( |
{ |
"Mean Max Advantage": mean_max_advantage, |
"Mean Min Advantage": mean_min_advantage, |
}, |
step=t * args.num_processes, |
) |
# Perform action and log results |
next_state, reward, done, infos = envs.step(action) |
masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) |
for i, info in enumerate(infos): |
if "bad_transition" in info.keys(): |
print("Bad transition") |
if level_sampler: |
if expect_new_seed[i]: |
level_seed = info["level_seed"] |
level_seeds[i][0] = level_seed |
if args.log_per_seed_stats: |
new_episode(value, estimates, level_seed, i, step=t * args.num_processes) |
expect_new_seed[i] = False |
state_deque[i].append(state[i]) |
reward_deque[i].append(reward[i]) |
action_deque[i].append(action[i]) |
reward_stats_deque[i].append(reward[i]) |
if len(state_deque[i]) == args.multi_step or done[i]: |
temp_reward = reward_deque[i] |
# if args.reward_clip > 0: |
# temp_reward = np.clip(temp_reward, -args.reward_clip, args.reward_clip) |
n_reward = multi_step_reward(temp_reward, args.gamma) |
n_state = state_deque[i][0] |
n_action = action_deque[i][0] |
replay_buffer.add( |
n_state, |
n_action, |
next_state[i], |
n_reward, |
np.uint8(done[i]), |
level_seeds[i], |
) |
if done[i]: |
reward_deque_i = list(reward_deque[i]) |
for j in range(1, len(reward_deque_i)): |
n_reward = multi_step_reward(reward_deque_i[j:], args.gamma) |
n_state = state_deque[i][j] |
n_action = action_deque[i][j] |
replay_buffer.add( |
n_state, |
n_action, |
next_state[i], |
n_reward, |
np.uint8(done[i]), |
level_seeds[i], |
) |
expect_new_seed[i] = True |
if "episode" in info.keys(): |
episode_reward = info["episode"]["r"] |
ppo_normalised_reward = ppo_normalise_reward(episode_reward, args.env_name) |
min_max_normalised_reward = min_max_normalise_reward(episode_reward, args.env_name) |
wandb.log( |
{ |
"Train Episode Returns": episode_reward, |
"Train Episode Returns (normalised)": ppo_normalised_reward, |
"Train Episode Returns (ppo normalised)": ppo_normalised_reward, |
"Train Episode Returns (min-max normalised)": min_max_normalised_reward, |
}, |
step=t * args.num_processes, |
) |
state_deque[i].clear() |
reward_deque[i].clear() |
action_deque[i].clear() |
if args.log_per_seed_stats: |
plot_level_returns( |
level_seeds, |
returns, |
estimates, |
gaps, |
episode_reward, |
i, |
step=t * args.num_processes, |
) |
if args.autodrq: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.