Skip to content

cli

mlflow_assistant.cli

CLI modules for MLflow Assistant.

commands

CLI commands for MLflow Assistant.

This module contains the main CLI commands for interacting with MLflow using natural language queries through various AI providers.

cli(verbose)

MLflow Assistant: Interact with MLflow using LLMs.

This CLI tool helps you to interact with MLflow using natural language.

Source code in src/mlflow_assistant/cli/commands.py
@click.group()
@click.option("--verbose", "-v", is_flag=True, help="Enable verbose logging")
def cli(verbose):
    """MLflow Assistant: Interact with MLflow using LLMs.

    This CLI tool helps you to interact with MLflow using natural language.
    """
    # Configure logging
    log_level = logging.DEBUG if verbose else logging.INFO
    logging.basicConfig(level=log_level, format=LOG_FORMAT)

mock_process_query(query, provider_config, verbose=False)

Mock function that simulates the query processing workflow.

This will be replaced with the actual implementation later.

Parameters:

Name Type Description Default
query str

The user's query

required
provider_config dict[str, Any]

The AI provider configuration

required
verbose bool

Whether to show verbose output

False

Returns:

Type Description
dict[str, Any]

Dictionary with mock response information

Source code in src/mlflow_assistant/cli/commands.py
def mock_process_query(
    query: str, provider_config: dict[str, Any], verbose: bool = False,
) -> dict[str, Any]:
    """Mock function that simulates the query processing workflow.

    This will be replaced with the actual implementation later.

    Args:
        query: The user's query
        provider_config: The AI provider configuration
        verbose: Whether to show verbose output

    Returns:
        Dictionary with mock response information

    """
    # Create a mock response
    provider_type = provider_config.get(
        CONFIG_KEY_TYPE, DEFAULT_STATUS_NOT_CONFIGURED,
    )
    model = provider_config.get(
        CONFIG_KEY_MODEL, DEFAULT_STATUS_NOT_CONFIGURED,
    )

    response_text = (
        f"This is a mock response to: '{query}'\n\n"
        f"The MLflow integration will be implemented soon!"
    )

    if verbose:
        response_text += f"\n\nDebug: Using {provider_type} with {model}"

    return {
        "original_query": query,
        "provider_config": {
            CONFIG_KEY_TYPE: provider_type,
            CONFIG_KEY_MODEL: model,
        },
        "enhanced": False,
        "response": response_text,
    }

setup()

Run the interactive setup wizard.

This wizard helps you configure MLflow Assistant.

Source code in src/mlflow_assistant/cli/commands.py
@cli.command()
def setup():
    """Run the interactive setup wizard.

    This wizard helps you configure MLflow Assistant.
    """
    setup_wizard()

start(verbose)

Start an interactive chat session with MLflow Assistant.

This opens an interactive chat session where you can ask questions about your MLflow experiments, models, and data. Type /bye to exit the session.

Examples of questions you can ask: - What are my best performing models for classification? - Show me details of experiment 'customer_churn' - Compare runs abc123 and def456 - Which hyperparameters should I try next for my regression model?

Commands: - /bye: Exit the chat session - /help: Show help about available commands - /clear: Clear the screen

Source code in src/mlflow_assistant/cli/commands.py
@cli.command()
@click.option("--verbose", "-v", is_flag=True, help="Show verbose output")
def start(verbose):
    """Start an interactive chat session with MLflow Assistant.

    This opens an interactive chat session where you can ask questions about
    your MLflow experiments, models, and data. Type /bye to exit the session.

    Examples of questions you can ask:
    - What are my best performing models for classification?
    - Show me details of experiment 'customer_churn'
    - Compare runs abc123 and def456
    - Which hyperparameters should I try next for my regression model?

    Commands:
    - /bye: Exit the chat session
    - /help: Show help about available commands
    - /clear: Clear the screen
    """
    # Use validation function to check setup
    is_valid, error_message = validate_setup()
    if not is_valid:
        click.echo(f"❌ Error: {error_message}")
        return

    # Get provider config
    provider_config = get_provider_config()

    # Print welcome message and instructions
    provider_type = provider_config.get(
        CONFIG_KEY_TYPE, DEFAULT_STATUS_NOT_CONFIGURED,
        )
    model = provider_config.get(
        CONFIG_KEY_MODEL, DEFAULT_STATUS_NOT_CONFIGURED,
        )

    click.echo("\n🤖 MLflow Assistant Chat Session")
    click.echo(f"Connected to MLflow at: {get_mlflow_uri()}")
    click.echo(f"Using {provider_type.upper()} with model: {model}")
    click.echo("\nType your questions and press Enter.")
    click.echo(f"Type {Command.EXIT.value} to exit.")
    click.echo("=" * 70)

    # Start interactive loop
    while True:
        # Get user input with a prompt
        try:
            query = click.prompt("\n🧑", prompt_suffix="").strip()
        except (KeyboardInterrupt, EOFError):
            click.echo("\nExiting chat session...")
            break

        # Handle special commands
        action = _handle_special_commands(query)
        if action == "exit":
            break
        if action == "continue":
            continue

        # Process the query
        asyncio.run(_process_user_query(query, provider_config, verbose))

version()

Show MLflow Assistant version information.

Source code in src/mlflow_assistant/cli/commands.py
@cli.command()
def version():
    """Show MLflow Assistant version information."""
    from mlflow_assistant import __version__

    click.echo(f"MLflow Assistant version: {__version__}")

    # Show configuration
    config = load_config()
    mlflow_uri = config.get(
        CONFIG_KEY_MLFLOW_URI, DEFAULT_STATUS_NOT_CONFIGURED,
        )
    provider = config.get(CONFIG_KEY_PROVIDER, {}).get(
        CONFIG_KEY_TYPE, DEFAULT_STATUS_NOT_CONFIGURED,
    )
    model = config.get(CONFIG_KEY_PROVIDER, {}).get(
        CONFIG_KEY_MODEL, DEFAULT_STATUS_NOT_CONFIGURED,
    )

    click.echo(f"MLflow URI: {mlflow_uri}")
    click.echo(f"Provider: {provider}")
    click.echo(f"Model: {model}")

setup

Setup wizard for MLflow Assistant configuration.

This module provides an interactive setup wizard that guides users through configuring MLflow Assistant, including MLflow connection settings and AI provider configuration (OpenAI or Ollama).

setup_wizard()

Interactive setup wizard for mlflow-assistant.

Source code in src/mlflow_assistant/cli/setup.py
def setup_wizard():
    """Interactive setup wizard for mlflow-assistant."""
    click.echo("┌──────────────────────────────────────────────────────┐")
    click.echo("│             MLflow Assistant Setup Wizard            │")
    click.echo("└──────────────────────────────────────────────────────┘")

    click.echo("\nThis wizard will help you configure MLflow Assistant.")

    # Initialize config
    config = load_config()
    previous_provider = config.get(
        CONFIG_KEY_PROVIDER, {}).get(CONFIG_KEY_TYPE)

    # MLflow URI
    mlflow_uri = click.prompt(
        "Enter your MLflow URI",
        default=config.get(CONFIG_KEY_MLFLOW_URI, DEFAULT_MLFLOW_URI),
    )

    if not validate_mlflow_uri(mlflow_uri):
        click.echo("\n⚠️  Warning: Could not connect to MLflow URI.")
        click.echo(
            "    Please ensure MLflow is running.",
        )
        click.echo(
            "    Common MLflow URLs: http://localhost:5000, "
            "http://localhost:8080",
        )
        if not click.confirm(
            "Continue anyway? (Choose Yes if you're sure MLflow is running)",
        ):
            click.echo(
                "Setup aborted. "
                "Please ensure MLflow is running and try again.")
            return
        click.echo("Continuing with setup using the provided MLflow URI.")
    else:
        click.echo("✅ Successfully connected to MLflow!")

    config[CONFIG_KEY_MLFLOW_URI] = mlflow_uri

    # AI Provider
    provider_options = [p.value.capitalize() for p in Provider]
    provider_choice = click.prompt(
        "\nWhich AI provider would you like to use?",
        type=click.Choice(provider_options, case_sensitive=False),
        default=config.get(CONFIG_KEY_PROVIDER, {})
        .get(CONFIG_KEY_TYPE, Provider.OPENAI.value)
        .capitalize(),
    )

    current_provider_type = provider_choice.lower()
    provider_config = {}

    # Check if provider is changing and handle default models
    provider_changed = (previous_provider and
                        previous_provider != current_provider_type)

    if current_provider_type == Provider.OPENAI.value:
        # If switching from another provider, show a message
        if provider_changed:
            click.echo("\n✅ Switching to OpenAI provider")

        # Initialize provider config
        provider_config = {
            CONFIG_KEY_TYPE: Provider.OPENAI.value,
            CONFIG_KEY_MODEL: Provider.get_default_model(
                Provider.OPENAI,
            ),  # Will be updated after user selection
        }

        # Check for OpenAI API key
        api_key = os.environ.get(OPENAI_API_KEY_ENV)
        if not api_key:
            click.echo(
                "\n⚠️  OpenAI API key not found in environment variables.",
            )
            click.echo(
                f"Please export your OpenAI API key as {OPENAI_API_KEY_ENV}.",
            )
            click.echo(f"Example: export {OPENAI_API_KEY_ENV}='your-key-here'")
            if not click.confirm("Continue without API key?"):
                click.echo(
                    "Setup aborted. Please set the API key and try again.",
                )
                return
        else:
            click.echo("✅ Found OpenAI API key in environment!")

        # Always ask for model choice
        model_choices = OpenAIModel.choices()

        # If changing providers, suggest the default,
        # otherwise use previous config
        if provider_changed:
            suggested_model = Provider.get_default_model(Provider.OPENAI)
        else:
            current_model = config.get(CONFIG_KEY_PROVIDER, {}).get(
                CONFIG_KEY_MODEL, Provider.get_default_model(Provider.OPENAI),
            )
            suggested_model = (
                current_model
                if current_model in model_choices
                else Provider.get_default_model(Provider.OPENAI)
            )

        model = click.prompt(
            "Choose an OpenAI model",
            type=click.Choice(model_choices, case_sensitive=False),
            default=suggested_model,
        )
        provider_config[CONFIG_KEY_MODEL] = model

    elif current_provider_type == Provider.OLLAMA.value:
        # If switching from another provider, automatically set defaults
        if provider_changed:
            click.echo(
                "\n✅ Switching to Ollama provider with default URI and model",
            )

        # Ollama configuration - always ask for URI
        ollama_uri = click.prompt(
            "\nEnter your Ollama server URI",
            default=config.get(CONFIG_KEY_PROVIDER, {}).get(
                CONFIG_KEY_URI, DEFAULT_OLLAMA_URI,
            ),
        )

        # Initialize provider config with default model and user-specified URI
        provider_config = {
            CONFIG_KEY_TYPE: Provider.OLLAMA.value,
            CONFIG_KEY_URI: ollama_uri,
            CONFIG_KEY_MODEL: Provider.get_default_model(
                Provider.OLLAMA,
            ),  # Will be updated if user selects a different model
        }

        # Check if Ollama is running
        is_connected, ollama_data = validate_ollama_connection(ollama_uri)
        if is_connected:
            click.echo("✅ Ollama server is running!")

            # Get available models
            available_models = ollama_data.get("models", [])

            if available_models:
                click.echo(
                    f"\nAvailable Ollama models: {', '.join(available_models)}",
                )

                # If changing providers, suggest the default,
                # otherwise use previous config
                default_model = Provider.get_default_model(Provider.OLLAMA)
                if provider_changed:
                    suggested_model = (
                        default_model
                        if default_model in available_models
                        else available_models[0]
                    )
                else:
                    current_model = config.get(CONFIG_KEY_PROVIDER, {}).get(
                        CONFIG_KEY_MODEL,
                    )
                    suggested_model = (
                        current_model
                        if current_model in available_models
                        else default_model
                    )

                ollama_model = click.prompt(
                    "Choose an Ollama model",
                    type=click.Choice(available_models, case_sensitive=True),
                    default=suggested_model,
                )
                provider_config[CONFIG_KEY_MODEL] = ollama_model
            else:
                click.echo("\nNo models found. Using default model.")
                ollama_model = click.prompt(
                    "Enter the Ollama model to use",
                    default=config.get(CONFIG_KEY_PROVIDER, {}).get(
                        CONFIG_KEY_MODEL, Provider.get_default_model(
                            Provider.OLLAMA,
                        ),
                    ),
                )
                provider_config[CONFIG_KEY_MODEL] = ollama_model
        else:
            click.echo(
                "\n⚠️  Warning: Ollama server not running or"
                " not accessible at this URI.",
            )
            if not click.confirm("Continue anyway?"):
                click.echo(
                    "Setup aborted. Please start Ollama server and try again.",
                )
                return

            # Still prompt for model name
            ollama_model = click.prompt(
                "Enter the Ollama model to use",
                default=config.get(CONFIG_KEY_PROVIDER, {}).get(
                    CONFIG_KEY_MODEL, Provider.get_default_model(
                        Provider.OLLAMA,
                    ),
                ),
            )
            provider_config[CONFIG_KEY_MODEL] = ollama_model

    elif current_provider_type == Provider.DATABRICKS.value:
        config_path = Path(DEFAULT_DATABRICKS_CONFIG_FILE).expanduser()
        # Verify Databricks configuration file path
        click.echo(f"Checking Databricks configuration file at: {config_path}")
        if not os.path.isfile(config_path):
            # File does not exist, prompt user to create it
            click.echo(
                    "Setup aborted. Please setup Databricks config file and try again.",
                )
            return

        # Get Databricks configuration file
        config_string = Path(config_path).read_text()

        # Get profiles from the Databricks configuration file
        # Parse the config string
        databricks_config = configparser.ConfigParser()
        databricks_config.read_string(config_string)

        # Manually include DEFAULT section
        all_sections = ['DEFAULT', *databricks_config.sections()]

        profile_options = [section for section in all_sections if 'token' in databricks_config[section]]

        if not profile_options:
            click.echo(
                "\n⚠️  No valid profiles found in Databricks configuration file.",
            )
            click.echo(
                "Please ensure your Databricks config file contains a profile with a 'token'.",
            )
            click.echo(
                "Setup aborted. Please fix the configuration and try again.",
            )
            return

        profile = click.prompt(
            "\nWhich databricks profile would you like to use?",
            type=click.Choice(profile_options, case_sensitive=False),
            default=profile_options[0],
        )

        # Peompt for model name
        databricks_model = click.prompt(
            "Enter the Databricks model to use",
        )

        provider_config = {
            CONFIG_KEY_TYPE: Provider.DATABRICKS.value,
            CONFIG_KEY_PROFILE: profile,
            CONFIG_KEY_MODEL: databricks_model,
        }

    config[CONFIG_KEY_PROVIDER] = provider_config

    # Save the configuration
    save_config(config)

    click.echo("\n✅ Configuration saved successfully!")
    click.echo("\n┌──────────────────────────────────────────────────┐")
    click.echo("│               Getting Started                    │")
    click.echo("└──────────────────────────────────────────────────┘")
    click.echo(
        "\nYou can now use MLflow Assistant with the following commands:")
    click.echo(
        "  mlflow-assistant start     - Start an interactive chat "
        "session.",
    )
    click.echo(
        "  mlflow-assistant version   - Show version "
        "information.",
    )

    click.echo("\nFor more information, use 'mlflow-assistant --help'")

validation

Validation utilities for MLflow Assistant configuration.

This module provides validation functions to check MLflow connections, AI provider configurations, and overall system setup to ensure proper operation of MLflow Assistant.

validate_mlflow_uri(uri)

Validate MLflow URI by attempting to connect.

Parameters:

Name Type Description Default
uri str

MLflow server URI

required

Returns:

Name Type Description
bool bool

True if connection successful, False otherwise

Source code in src/mlflow_assistant/cli/validation.py
def validate_mlflow_uri(uri: str) -> bool:
    """Validate MLflow URI by attempting to connect.

    Args:
        uri: MLflow server URI

    Returns:
        bool: True if connection successful, False otherwise

    """
    for endpoint in MLFLOW_VALIDATION_ENDPOINTS:
        try:
            # Try with trailing slash trimmed
            clean_uri = uri.rstrip("/")
            url = f"{clean_uri}{endpoint}"
            logger.debug(f"Trying to connect to MLflow at: {url}")

            response = requests.get(url, timeout=MLFLOW_CONNECTION_TIMEOUT)
            if response.status_code == 200:
                logger.info(f"Successfully connected to MLflow at {url}")
                return True
            logger.debug(f"Response from {url}: {response.status_code}")
        except Exception as e:
            logger.debug(f"Failed to connect to {endpoint}: {e!s}")

    # If we get here, none of the endpoints worked
    logger.warning(
        f"Could not validate MLflow at {uri} on any standard endpoint",
    )
    return False

validate_ollama_connection(uri)

Validate Ollama connection and get available models.

Parameters:

Name Type Description Default
uri str

Ollama server URI

required

Returns:

Type Description
tuple[bool, dict[str, Any]]

Tuple[bool, Dict[str, Any]]: (is_valid, response_data)

Source code in src/mlflow_assistant/cli/validation.py
def validate_ollama_connection(uri: str) -> tuple[bool, dict[str, Any]]:
    """Validate Ollama connection and get available models.

    Args:
        uri: Ollama server URI

    Returns:
        Tuple[bool, Dict[str, Any]]: (is_valid, response_data)

    """
    try:
        response = requests.get(
            f"{uri}{OLLAMA_TAGS_ENDPOINT}", timeout=OLLAMA_CONNECTION_TIMEOUT,
        )
        if response.status_code == 200:
            try:
                models_data = response.json()
                available_models = [
                    m.get("name") for m in models_data.get("models", [])
                ]
                return True, {"models": available_models}
            except Exception as e:
                logger.debug(f"Error parsing Ollama models: {e}")
                return True, {"models": []}
        else:
            return False, {}
    except Exception as e:
        logger.debug(f"Error connecting to Ollama: {e}")
        return False, {}

validate_setup(check_api_key=True)

Validate that MLflow Assistant is properly configured.

Parameters:

Name Type Description Default
check_api_key bool

Whether to check for API key if using OpenAI

True

Returns:

Type Description
tuple[bool, str]

Tuple[bool, str]: (is_valid, error_message)

Source code in src/mlflow_assistant/cli/validation.py
def validate_setup(check_api_key: bool = True) -> tuple[bool, str]:
    """Validate that MLflow Assistant is properly configured.

    Args:
        check_api_key: Whether to check for API key if using OpenAI

    Returns:
        Tuple[bool, str]: (is_valid, error_message)

    """
    # Check MLflow URI
    mlflow_uri = get_mlflow_uri()
    if not mlflow_uri:
        return (
            False,
            "MLflow URI not configured. "
            "Run 'mlflow-assistant setup' first.",
        )

    # Get provider config
    provider_config = get_provider_config()
    if not provider_config or not provider_config.get(CONFIG_KEY_TYPE):
        return (
            False,
            "AI provider not configured. "
            "Run 'mlflow-assistant setup' first.",
        )

    # Ensure OpenAI has an API key if that's the configured provider
    if (
        check_api_key
        and provider_config.get(CONFIG_KEY_TYPE) == Provider.OPENAI.value
        and not provider_config.get(CONFIG_KEY_API_KEY)
    ):
        return (
            False,
            f"OpenAI API key not found in environment. "
            f"Set {OPENAI_API_KEY_ENV}.",
        )

    return True, ""