agents/ql_cvae.py [177:185]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            q1_new_action, q2_new_action = self.critic(state, new_action)
            if np.random.uniform() > 0.5:
                lmbda = self.eta / q2_new_action.abs().mean().detach()
                q_loss = - lmbda * q1_new_action.mean()
            else:
                lmbda = self.eta / q1_new_action.abs().mean().detach()
                q_loss = - lmbda * q2_new_action.mean()

            self.actor_optimizer.zero_grad()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



agents/ql_diffusion.py [142:153]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            q1_new_action, q2_new_action = self.critic(state, new_action)
            if np.random.uniform() > 0.5:
                lmbda = self.eta / q2_new_action.abs().mean().detach()
                q_loss = - lmbda * q1_new_action.mean()
            else:
                lmbda = self.eta / q1_new_action.abs().mean().detach()
                q_loss = - lmbda * q2_new_action.mean()
            # q_new_action = self.critic.q_min(state, new_action)
            # lmbda = self.eta / q_new_action.abs().mean().detach()
            # q_loss = - lmbda * q_new_action.mean()

            self.actor_optimizer.zero_grad()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



