Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,10 @@ MLOS is a project to enable autotuning for systems.
- [Installation](#installation)
- [See Also](#see-also)
- [Examples](#examples)
- [MLOS Viz Panel](#mlos-viz-panel)
- [Usage](#usage)
- [Running the Backend](#running-the-backend)
- [Running the Frontend](#running-the-frontend)

<!-- /TOC -->

Expand Down
16 changes: 12 additions & 4 deletions mlos_bench/mlos_bench/optimizers/base_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def __init__(self,
# if True (default), use the already initialized values for the first iteration.
self._start_with_defaults: bool = bool(
strtobool(str(self._config.pop('start_with_defaults', True))))
self._max_iter = int(self._config.pop('max_suggestions', 100))
self._max_iter = int(self._config.pop('max_suggestions', 5000))

opt_targets: Dict[str, str] = self._config.pop('optimization_targets', {'score': 'min'})
self._opt_targets: Dict[str, Literal[1, -1]] = {}
Expand Down Expand Up @@ -242,16 +242,24 @@ def bulk_register(self,
is_not_empty : bool
True if there is data to register, false otherwise.
"""
_LOG.info("Update the optimizer with: %d configs, %d scores, %d status values",
len(configs or []), len(scores or []), len(status or []))
_LOG.info(
"Update the optimizer with: %d configs, %d scores, %d status values",
len(configs or []), len(scores or []), len(status or [])
)

if len(configs or []) != len(scores or []):
raise ValueError("Numbers of configs and scores do not match.")
if status is not None and len(configs or []) != len(status or []):
raise ValueError("Numbers of configs and status values do not match.")

has_data = bool(configs and scores)
if has_data and self._start_with_defaults:
if len(configs or []) == 1:
_LOG.info("Only one configuration provided, using defaults.")
self._start_with_defaults = True
elif has_data and self._start_with_defaults:
_LOG.info("Prior data exists - do *NOT* use the default initialization.")
self._start_with_defaults = False

return has_data

def suggest(self) -> TunableGroups:
Expand Down
21 changes: 18 additions & 3 deletions mlos_bench/mlos_bench/optimizers/mlos_core_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,9 +92,9 @@ def name(self) -> str:
return f"{self.__class__.__name__}:{self._opt.__class__.__name__}"

def bulk_register(self,
configs: Sequence[dict],
scores: Sequence[Optional[Dict[str, TunableValue]]],
status: Optional[Sequence[Status]] = None) -> bool:
configs: Sequence[dict],
scores: Sequence[Optional[Dict[str, TunableValue]]],
status: Optional[Sequence[Status]] = None) -> bool:

if not super().bulk_register(configs, scores, status):
return False
Expand All @@ -104,6 +104,9 @@ def bulk_register(self,
df_scores = self._adjust_signs_df(
pd.DataFrame([{} if score is None else score for score in scores]))

# Convert all score columns to numeric, coercing errors to NaN
df_scores = df_scores.apply(pd.to_numeric, errors='coerce')

opt_targets = list(self._opt_targets)
if status is not None:
# Select only the completed trials, set scores for failed trials to +inf.
Expand All @@ -125,6 +128,7 @@ def bulk_register(self,

return True


def _adjust_signs_df(self, df_scores: pd.DataFrame) -> pd.DataFrame:
"""
In-place adjust the signs of the scores for MINIMIZATION problem.
Expand Down Expand Up @@ -202,7 +206,18 @@ def get_best_observation(self) -> Union[Tuple[Dict[str, float], TunableGroups],
(df_config, df_score, _df_context) = self._opt.get_best_observations()
if len(df_config) == 0:
return (None, None)

params = configspace_data_to_tunable_values(df_config.iloc[0].to_dict())
scores = self._adjust_signs_df(df_score).iloc[0].to_dict()

# Check for NaN values in all optimization targets and replace with 0
for target in self._opt_targets:
if target in scores:
if pd.isna(scores[target]):
_LOG.warning(f"'{target}' is NaN in the best observation. Setting it to 0.")
scores[target] = 0
else:
_LOG.warning(f"'{target}' not found in the scores.")

_LOG.debug("Best observation: %s score: %s", params, scores)
return (scores, self._tunables.copy().assign(params))
2 changes: 1 addition & 1 deletion mlos_bench/mlos_bench/storage/sql/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ class DbSchema:
# pylint: disable=too-many-instance-attributes

# Common string column sizes.
_ID_LEN = 512
_ID_LEN = 256
_PARAM_VALUE_LEN = 1024
_METRIC_VALUE_LEN = 255
_STATUS_LEN = 16
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,6 @@
"experiment_id": "MyExperimentName",
"config_id": 1,
"trial_id": 1,
"max_trials": 100
"max_trials": 200
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def __init__(self, *, # pylint: disable=too-many-locals,too-many-arguments
seed: Optional[int] = 0,
run_name: Optional[str] = None,
output_directory: Optional[str] = None,
max_trials: int = 100,
max_trials: int = 5000,
n_random_init: Optional[int] = None,
max_ratio: Optional[float] = None,
use_default_config: bool = False,
Expand Down
6 changes: 6 additions & 0 deletions mlos_vizpanel/.streamlit/config.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
[theme]
primaryColor = '#FF8C00' # Amber for better visibility of sliders and other interactive elements
backgroundColor = '#FFFFFF' # Azure blue background
secondaryBackgroundColor = '#ADD8F6' # Darker blue for the sidebar
textColor = '#0078D4' # White text
font = "Segoe UI" # Microsoft's standard font
53 changes: 53 additions & 0 deletions mlos_vizpanel/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@


### MLOS Viz Panel

3. Set up Azure credentials for OpenAI and Azure Compute:
- Create a `azure_openai_credentials.json` file with the following structure:
```json
{
"azure_endpoint": "<your_azure_endpoint>",
"api_key": "<your_api_key>",
"api_version": "<api_version>"
}
```
- Ensure you have configured Azure credentials for the `ComputeManagementClient` to access VM SKUs.

- Create `global_config_storage.jsonc`
```json
{
"host": "x.mysql.database.azure.com",
"username": "mlos",
"password": "x",
"database": "x"
}
```

- Create `global_config_azure.json`
```json
{
"subscription": "x",
"tenant": "x",
"storageAccountKey": "x"
}
```

4. Set up the necessary configuration files in the `config/` directory as per your environment.

## Usage

### Running the Backend

1. Navigate to the project directory.
2. Start the FastAPI server:
```bash
uvicorn backend:app --reload
```

### Running the Frontend

1. Navigate to the project directory.
2. Start the Streamlit application:
```bash
streamlit run frontend.py
```
124 changes: 124 additions & 0 deletions mlos_vizpanel/backend.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
from datetime import datetime, timedelta
import time
import schedule
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import os
from openai import AzureOpenAI
from fastapi import FastAPI, Request, WebSocket, WebSocketDisconnect, HTTPException
from pydantic import BaseModel
import pandas as pd
import json
from pathlib import Path
from azure.mgmt.compute import ComputeManagementClient
from azure.identity import DefaultAzureCredential
from mlos_bench.storage import from_config
from copy import deepcopy
import subprocess
import logging
import asyncio
from fastapi.middleware.cors import CORSMiddleware
import re
import json5

app = FastAPI()

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Load global configuration
base_dir = Path(__file__).resolve().parent
global_config_path = base_dir / 'global_config_azure.json'
with global_config_path.open() as f:
global_config = json.load(f)
subscription_id = global_config['subscription']

# Load the storage config and connect to the storage
storage_config_path = "config/storage/mlos-mysql-db.jsonc"
try:
storage = from_config(config_file=storage_config_path)
except Exception as e:
raise HTTPException(
status_code=500, detail=f"Error loading storage configuration: {e}"
)

@app.get("/experiments")
def get_experiments():
return list(storage.experiments.keys())

@app.get("/experiment_results/{experiment_id}")
def get_experiment_results(experiment_id: str):
try:
exp = storage.experiments[experiment_id]
return exp.results_df.to_dict(orient="records")
except KeyError:
raise HTTPException(status_code=404, detail="Experiment not found")

def count_categorical_values(df: pd.DataFrame) -> str:
categorical_counts = {}
for col in df.select_dtypes(include=['object', 'category']).columns:
counts = df[col].value_counts().to_dict()
categorical_counts[col] = counts

count_str = "Categorical Counts:\n"
for col, counts in categorical_counts.items():
count_str += f"{col}:\n"
for value, count in counts.items():
count_str += f" {value}: {count}\n"

return count_str

# Load credentials from the JSON file
with open('azure_openai_credentials.json', 'r') as file:
credentials = json.load(file)

# Try to create the AzureOpenAI client
try:
client = AzureOpenAI(
azure_endpoint=credentials['azure_endpoint'],
api_key=credentials['api_key'],
api_version=credentials['api_version']
)
except Exception as e:
print("Error creating AzureOpenAI client:", e)
class ExperimentExplanationRequest(BaseModel):
experiment_id: str

@ app.post("/get_experiment_explanation")
def get_experiment_explanation(request: ExperimentExplanationRequest):
experiment_id = request.experiment_id
try:
exp = storage.experiments[experiment_id]
# Taking only the first 10 rows for simplicity
df = exp.results_df.tail(10)
experiment_data = df.to_dict(orient='records')

df_head = exp.results_df.head(10)
experiment_data_head = df_head.to_dict(orient='records')

df_des = exp.results_df.describe()
experiment_data_des = df_des.to_dict(orient='records')

count_str = count_categorical_values(df)

prompt = f"Explain the following experiment data: First 10 rows {experiment_data_head} last 10 {experiment_data} & descriptive stats {experiment_data_des} & categorical vars counts {count_str}. Give me params to complement config. params present in the data. Also explain what each param does and params for MySQL config that would complement what we have and can boost preformance if tuned. Explain which are dangreous to tune as it might fail the server. Also talk about parameters that are safe to tune. Talk about each in list format so that you are listing all information relevant to a param under its name"

response = client.chat.completions.create(
model="gpt4o", # model = "deployment_name".
messages=[
{"role": "assistant",
"content": prompt}
],
max_tokens=1000
)

explanation = response.choices[0].message.content.strip()
print(explanation)
return {"explanation": explanation}
except KeyError:
raise HTTPException(status_code=404, detail="Experiment not found")

if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000, reload=True)
Loading