run_bc.py [126:170]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        if eval_norm_res >= best_score:
            if args.save_best_model: agent.save_model(output_dir)
            best_score = eval_norm_res
            best_res = {'epoch': curr_epoch, 'best normalized score avg': eval_norm_res,
                        'best normalized score std': eval_norm_res_std,
                        'best raw score avg': eval_res, 'best raw score std': eval_res_std}
            with open(os.path.join(output_dir, "best_score.txt"), 'w') as f:
                f.write(json.dumps(best_res))


# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval_policy(policy, env_name, seed, eval_episodes=10):
    eval_env = gym.make(env_name)
    eval_env.seed(seed + 100)

    scores = []
    for _ in range(eval_episodes):
        traj_return = 0.
        state, done = eval_env.reset(), False
        while not done:
            action = policy.sample_action(np.array(state))
            state, reward, done, _ = eval_env.step(action)
            traj_return += reward
        scores.append(traj_return)

    avg_reward = np.mean(scores)
    std_reward = np.std(scores)

    normalized_scores = [eval_env.get_normalized_score(s) for s in scores]
    avg_norm_score = eval_env.get_normalized_score(avg_reward)
    std_norm_score = np.std(normalized_scores)

    utils.print_banner(f"Evaluation over {eval_episodes} episodes: {avg_reward:.2f} {avg_norm_score:.2f}")
    return avg_reward, std_reward, avg_norm_score, std_norm_score


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    ### Experimental Setups ###
    parser.add_argument("--exp", default='exp_1', type=str)             # Experiment ID
    parser.add_argument('--device', default=0, type=int)                  # device, {"cpu", "cuda", "cuda:0", "cuda:1"}, etc
    parser.add_argument("--env_name", default="walker2d-expert-v2", type=str)  # OpenAI gym environment name
    parser.add_argument("--dir", default="tests", type=str)  # Logging directory
    parser.add_argument("--seed", default=0, type=int)              # Sets Gym, PyTorch and Numpy seeds
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



run_offline.py [124:168]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        if eval_norm_res >= best_score:
            if args.save_best_model: agent.save_model(output_dir)
            best_score = eval_norm_res
            best_res = {'epoch': curr_epoch, 'best normalized score avg': eval_norm_res,
                        'best normalized score std': eval_norm_res_std,
                        'best raw score avg': eval_res, 'best raw score std': eval_res_std}
            with open(os.path.join(output_dir, "best_score.txt"), 'w') as f:
                f.write(json.dumps(best_res))


# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval_policy(policy, env_name, seed, eval_episodes=10):
    eval_env = gym.make(env_name)
    eval_env.seed(seed + 100)

    scores = []
    for _ in range(eval_episodes):
        traj_return = 0.
        state, done = eval_env.reset(), False
        while not done:
            action = policy.sample_action(np.array(state))
            state, reward, done, _ = eval_env.step(action)
            traj_return += reward
        scores.append(traj_return)

    avg_reward = np.mean(scores)
    std_reward = np.std(scores)

    normalized_scores = [eval_env.get_normalized_score(s) for s in scores]
    avg_norm_score = eval_env.get_normalized_score(avg_reward)
    std_norm_score = np.std(normalized_scores)

    utils.print_banner(f"Evaluation over {eval_episodes} episodes: {avg_reward:.2f} {avg_norm_score:.2f}")
    return avg_reward, std_reward, avg_norm_score, std_norm_score


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    ### Experimental Setups ###
    parser.add_argument("--exp", default='exp_1', type=str)             # Experiment ID
    parser.add_argument('--device', default=0, type=int)                  # device, {"cpu", "cuda", "cuda:0", "cuda:1"}, etc
    parser.add_argument("--env_name", default="walker2d-expert-v2", type=str)  # OpenAI gym environment name
    parser.add_argument("--dir", default="tests", type=str)  # Logging directory
    parser.add_argument("--seed", default=0, type=int)              # Sets Gym, PyTorch and Numpy seeds
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



