agents/adw_bc_diffusion.py [72:83]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                 ):

        if model_type == 'MLP':
            self.model = MLP(state_dim=state_dim, action_dim=action_dim, device=device)
        elif model_type == 'MLP_Unet':
            self.model = MLP_Unet(state_dim=state_dim, action_dim=action_dim, device=device)
        elif model_type == 'Tanh_MLP':
            self.model = Tanh_MLP(state_dim=state_dim, action_dim=action_dim, max_action=max_action, device=device)

        self.actor = Diffusion(state_dim=state_dim, action_dim=action_dim, model=self.model, max_action=max_action,
                               beta_schedule=beta_schedule, n_timesteps=n_timesteps,
                               ).to(device)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



agents/qgdp.py [80:91]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                 ):

        if model_type == 'MLP':
            self.model = MLP(state_dim=state_dim, action_dim=action_dim, device=device)
        elif model_type == 'MLP_Unet':
            self.model = MLP_Unet(state_dim=state_dim, action_dim=action_dim, device=device)
        elif model_type == 'Tanh_MLP':
            self.model = Tanh_MLP(state_dim=state_dim, action_dim=action_dim, max_action=max_action, device=device)

        self.actor = Diffusion(state_dim=state_dim, action_dim=action_dim, model=self.model, max_action=max_action,
                               beta_schedule=beta_schedule, n_timesteps=n_timesteps,
                               ).to(device)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



