def llm_call()

in functions/function_app.py [0:0]


def llm_call(model, system_template, human_template, param_provider: Callable[[], dict], custom_output_parser) -> str:
    try:
        api_key = os.environ['OPENAI_API_KEY']
        # ranges from 0 to 2, with lower values indicating greater determinism and higher values indicating more randomness.
        temperature=0.7
        # default value is bigger and dependent on model
        max_tokens=2048
        chat_model = ChatOpenAI(model=model, api_key=api_key, temperature=temperature, max_tokens=max_tokens)
        system_message_prompt = SystemMessagePromptTemplate.from_template(system_template)

        human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)

        chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])

        # Create the LLM chain
        chain = LLMChain(
            llm=chat_model,
            prompt=chat_prompt,
            output_parser=custom_output_parser
        )

        params = param_provider()
        # Run the chain and return the result
        return chain.run(**params)
    except Exception as e:
        # Handle any exceptions that occur during the LLM call
        print(f"An error occurred while calling the LLM: {e}")
        raise e