in src/epam/auto_llm_eval/evaluator.py [0:0]
def to_data_frame(self, metric_name: str) -> dict[str, any]:
"""
Store the evaluation result as a dataframe row.
Converts the evaluation result into a dictionary format suitable for
creating a dataframe row.
Args:
metric_name (str): The name of the metric being evaluated.
Returns:
dict[str, any]: A dictionary containing evaluation results and
metadata.
"""
result_dict: dict[str, any] = {
"metric": metric_name,
"score": self.get_score(),
"weighted_score": self.get_weighted_score(),
"is_confident": self.is_confident(),
}
prob_dict: dict[str, float] = {
f"score_{grade}": prob for grade, prob in self.probabilities
}
merged_dict: dict[str, any] = {
**result_dict,
**prob_dict,
**self.metadata
}
return merged_dict