in Utils/llm/api.py [0:0]
def request_gemini_pro_data(system_prompt, messages):
config = API[Model.GeminiPro]()
headers = {
'Content-Type': 'application/json',
"Authorization": f"Bearer {config['api_key']}",
}
contents = [
{"role": message['role'], "parts": [{"text": message['content']}]}
for message in messages
]
payload = {
"contents": contents,
"system_instruction": {"parts": [{"text": system_prompt}]},
"generation_config": {
"maxOutputTokens": 8192,
"temperature": temperature,
},
"safetySettings": [
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_ONLY_HIGH",
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_ONLY_HIGH",
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_ONLY_HIGH",
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_ONLY_HIGH",
}
],
}
response = requests.post(config["url"], headers=headers, json=payload, timeout=300)
if not response.ok:
raise APIException(response.status_code, response.content)
data = response.json()
return {
'content': data["candidates"][0]["content"]["parts"][0]["text"],
'tokens': {
"input_tokens": data["usageMetadata"]["promptTokenCount"],
"output_tokens": data["usageMetadata"]["candidatesTokenCount"],
}
}