def preprocess()

in gpu-workload/t5/model/handler.py [0:0]


    def preprocess(self, requests):
        input_batch = None
        texts_batch = []
        for idx, data in enumerate(requests):
            data = data["body"]
            input_text = data["text"]
            src_lang = data["from"]
            tgt_lang = data["to"]
            if isinstance(input_text, (bytes, bytearray)):
                input_text = input_text.decode("utf-8")
                src_lang = src_lang.decode("utf-8")
                tgt_lang = tgt_lang.decode("utf-8")
            texts_batch.append(f"translate {self._LANG_MAP[src_lang]} to {self._LANG_MAP[tgt_lang]}: {input_text}")
        inputs = self.tokenizer(texts_batch, return_tensors="pt")
        input_batch = inputs["input_ids"].to(self.device)
        return input_batch