def _train_one_round()

in sourcecode/scoring/reputation_matrix_factorization/reputation_matrix_factorization.py [0:0]


def _train_one_round(model, loss_fn, dataset, hParams):
  # Identify tensors for training and testing
  notes = dataset.noteTensor
  raters = dataset.raterTensor
  numRatings = dataset.raterTensor.shape[0]
  # Initilaize training state
  optim = torch.optim.Adam(model.parameters(), lr=hParams.learningRate)
  epoch = 0
  start = time.time()
  priorLoss = None
  while epoch <= hParams.numEpochs:
    # Set gradients to zero
    optim.zero_grad()
    # Perform forward pass
    pred = model(notes, raters)
    # Compute loss
    loss = loss_fn(pred.flatten())
    loss += model.get_regularization_loss(numRatings)
    assert not torch.isnan(loss).any()
    if hParams.logRate and epoch % hParams.logRate == 0:
      logger.info(f"epoch={epoch:03d} | loss={loss.item():7.6f} | time={time.time() - start:.1f}s")
    if hParams.convergence > 0 and epoch % hParams.stablePeriod == 0:
      if priorLoss is not None and (priorLoss - loss).abs() < hParams.convergence:
        if hParams.logRate:
          logger.info(
            f"epoch={epoch:03d} | loss={loss.item():7.6f} | time={time.time() - start:.1f}s"
          )
        break
      priorLoss = loss
    # Perform backward pass
    loss.backward()
    # Update parameters
    optim.step()
    # Increment epoch
    epoch += 1
  return loss.item()