Bases: AIProvider
OpenAI provider implementation.
Initialize the OpenAI provider with API key and model.
Source code in src/mlflow_assistant/providers/openai_provider.py
| def __init__(
self,
api_key=None,
model=OpenAIModel.GPT35.value,
temperature: float | None = None,
**kwargs,
):
"""Initialize the OpenAI provider with API key and model."""
self.api_key = api_key
self.model_name = model or OpenAIModel.GPT35.value
self.temperature = (
temperature or Provider.get_default_temperature(Provider.OPENAI.value)
)
self.kwargs = kwargs
if not self.api_key:
logger.warning("No OpenAI API key provided. Responses may fail.")
# Build parameters dict with only non-None values
model_params = {
"api_key": api_key,
"model": self.model_name,
"temperature": temperature,
}
# Only add optional parameters if they're not None
for param in ParameterKeys.get_parameters(Provider.OLLAMA.value):
if param in kwargs and kwargs[param] is not None:
model_params[param] = kwargs[param]
# Initialize with parameters matching the documentation
self.model = ChatOpenAI(**model_params)
logger.debug(f"OpenAI provider initialized with model {self.model_name}")
|
langchain_model()
Get the underlying LangChain model.
Source code in src/mlflow_assistant/providers/openai_provider.py
| def langchain_model(self):
"""Get the underlying LangChain model."""
return self.model
|