def on_start()

in gpu-workload/t5/loadgenerator/locustfile.py [0:0]


    def on_start(self):
        model_name = os.getenv('MODEL_NAME', 't5-small')
        model_version = os.getenv('MODEL_VERSION', '1.0')

        self.infer_url = f'{self.environment.host}/predictions/{model_name}/{model_version}'
        self.payload = {
            "text": "this is a test sentence",
            "from": "en",
            "to": "es"
        }