in databricks/lib/spark_helper/predictions.py [0:0]
def load_predictions(self, job_id: int) -> List[Prediction]:
prediction_file_paths = self.storage_service.list_files(
Path(f"{self.VOLUME_NAME}/{job_id}")
)
predictions = []
for file_path in prediction_file_paths:
prediction = json.loads(self.storage_service.read_text(file_path))
predictions.append(
Prediction(
job_id=prediction["job_id"],
file_id=prediction["file_id"],
ground_truth_revision_id=prediction[
"ground_truth_revision_id"
],
model_params=ModelParams(
model_name=prediction["model_params"]["model_name"],
model_version=prediction["model_params"][
"model_version"
],
temperature=prediction["model_params"]["temperature"],
prompt=prediction["model_params"]["prompt"],
json_schema=(
json.loads(
prediction["model_params"]["json_schema"]
)
if prediction["model_params"]["json_schema"]
else None
),
),
prediction_result=prediction["prediction_result"],
created_date=datetime.fromisoformat(
prediction["created_date"]
),
)
)
return predictions