def ask_model()

in Utils/llm/api.py [0:0]


def ask_model(messages, system_prompt, model, attempt=1):
    start_time = time.time()
    print(f'\tAttempt {attempt} at {datetime.now()}')
    try:
        data = None

        match model:
            case Model.GeminiPro:
                data = request_gemini_pro_data(system_prompt, messages)
            case Model.GeminiPro_0801 | Model.Gemini_15_Pro_002 | Model.GeminiPro_1114 | Model.GeminiPro_1121:
                data = request_google_ai_studio_data(system_prompt, messages, model)
            case Model.Opus_3 | Model.Sonnet_35 | Model.Sonnet_35v2 | Model.Haiku_35:
                data = request_claude_data(system_prompt, messages, model)
            case Model.AmazonNovaPro:
                data = request_bedrock_data(system_prompt, messages, model)
            case _:
                data = request_openai_format_data(system_prompt, messages, model)

        execute_time = time.time() - start_time
        return {
            "content": data["content"],
            "tokens": data["tokens"],
            "execute_time": execute_time
        }
    except APIException as e:
        print(f"Error: {e.status_code}")
        print(f"Error: {e.content}")
        if e.status_code == 429:
            print('Will try in 1 minute...')
            time.sleep(60)
            return ask_model(messages, system_prompt, model, attempt + 1)
        else:
            if attempt > 2:
                return {
                    "error": f'### Error: {e.content}\n'
                }
            else:
                print("\tTrying again...")
                time.sleep(10)
                return ask_model(messages, system_prompt, model, attempt + 1)
    except requests.exceptions.Timeout:
        if attempt > 2:
            return {
                "error": f'### Error: Timeout error\n'
            }
        print("\tRequest timed out. Trying again...")
        return ask_model(messages, system_prompt, model, attempt + 1)