This shows you the differences between two versions of the page.
| Next revision | Previous revision | ||
| wiki:ai:cli-workspace-deployment-upload [2025/06/03 17:35] – created ddehamer | wiki:ai:cli-workspace-deployment-upload [2025/06/04 15:51] (current) – ddehamer | ||
|---|---|---|---|
| Line 1: | Line 1: | ||
| ====== CLI ML Workspace Transcript ====== | ====== CLI ML Workspace Transcript ====== | ||
| - | With errors | + | This turned out to take way longer than expected due to ChatGPT getting into a loop and breaking one thing to fix another |
| + | |||
| + | ===== Working Scripts ===== | ||
| + | |||
| + | train.py | ||
| + | < | ||
| + | # | ||
| + | |||
| + | import pandas as pd | ||
| + | import numpy as np | ||
| + | from sklearn.model_selection import train_test_split | ||
| + | from sklearn.preprocessing import OneHotEncoder | ||
| + | from sklearn.linear_model import Ridge | ||
| + | from sklearn.metrics import mean_squared_error | ||
| + | from sklearn.compose import ColumnTransformer | ||
| + | from sklearn.pipeline import Pipeline | ||
| + | import joblib | ||
| + | |||
| + | # Load data | ||
| + | df = pd.read_csv(" | ||
| + | |||
| + | # Features and target | ||
| + | features = [" | ||
| + | target = " | ||
| + | |||
| + | X = df[features] | ||
| + | y = df[target] | ||
| + | |||
| + | # Train/test split | ||
| + | X_train, X_test, y_train, y_test = train_test_split(X, | ||
| + | |||
| + | # Preprocessing | ||
| + | categorical_features = [" | ||
| + | numeric_features = [" | ||
| + | |||
| + | preprocessor = ColumnTransformer( | ||
| + | transformers=[ | ||
| + | (" | ||
| + | (" | ||
| + | ] | ||
| + | ) | ||
| + | |||
| + | # Build pipeline | ||
| + | model = Pipeline(steps=[ | ||
| + | (" | ||
| + | (" | ||
| + | ]) | ||
| + | |||
| + | # Train | ||
| + | model.fit(X_train, | ||
| + | |||
| + | # Evaluate | ||
| + | y_pred = model.predict(X_test) | ||
| + | rmse = np.sqrt(mean_squared_error(y_test, | ||
| + | print(f" | ||
| + | |||
| + | # Save model | ||
| + | joblib.dump(model, | ||
| + | </ | ||
| + | |||
| + | env.yml | ||
| + | < | ||
| + | name: collectibles-env | ||
| + | channels: | ||
| + | - conda-forge | ||
| + | - defaults | ||
| + | dependencies: | ||
| + | - python=3.9 | ||
| + | - pip | ||
| + | - pip: | ||
| + | - numpy==1.26.4 | ||
| + | - pandas==2.2.2 | ||
| + | - scikit-learn==1.6.1 | ||
| + | - joblib | ||
| + | - azureml-inference-server-http | ||
| + | </ | ||
| + | |||
| + | score.py | ||
| + | < | ||
| + | # | ||
| + | import json | ||
| + | import os | ||
| + | import joblib | ||
| + | import pandas as pd | ||
| + | |||
| + | model = None | ||
| + | |||
| + | def init(): | ||
| + | global model | ||
| + | model_path = os.path.join(os.getenv(" | ||
| + | model = joblib.load(model_path) | ||
| + | |||
| + | def run(request): | ||
| + | try: | ||
| + | # Azure ML sends the request body as a string; parse it | ||
| + | data = json.loads(request) | ||
| + | |||
| + | # Ensure we're dealing with a list of records | ||
| + | if isinstance(data, | ||
| + | df = pd.DataFrame(data) | ||
| + | elif isinstance(data, | ||
| + | df = pd.DataFrame(data[" | ||
| + | else: | ||
| + | return json.dumps({" | ||
| + | |||
| + | predictions = model.predict(df) | ||
| + | return json.dumps(predictions.tolist()) | ||
| + | except Exception as e: | ||
| + | return json.dumps({" | ||
| + | </ | ||
| + | |||
| + | deploy_to_azure_clean.py | ||
| + | < | ||
| + | # | ||
| + | |||
| + | from azure.identity import DefaultAzureCredential | ||
| + | from azure.ai.ml import MLClient | ||
| + | from azure.ai.ml.entities import ( | ||
| + | ManagedOnlineEndpoint, | ||
| + | ManagedOnlineDeployment, | ||
| + | Model, | ||
| + | Environment, | ||
| + | CodeConfiguration | ||
| + | ) | ||
| + | import uuid | ||
| + | |||
| + | # Set your Azure environment details | ||
| + | subscription_id = " | ||
| + | resource_group = " | ||
| + | workspace_name = " | ||
| + | |||
| + | # Connect to Azure ML workspace | ||
| + | ml_client = MLClient( | ||
| + | DefaultAzureCredential(), | ||
| + | subscription_id, | ||
| + | resource_group, | ||
| + | workspace_name | ||
| + | ) | ||
| + | |||
| + | # Register the model | ||
| + | model = Model( | ||
| + | path=" | ||
| + | name=" | ||
| + | description=" | ||
| + | type=" | ||
| + | ) | ||
| + | registered_model = ml_client.models.create_or_update(model) | ||
| + | |||
| + | # Create the environment | ||
| + | env = Environment( | ||
| + | name=" | ||
| + | description=" | ||
| + | image=" | ||
| + | conda_file=" | ||
| + | ) | ||
| + | ml_client.environments.create_or_update(env) | ||
| + | |||
| + | # Generate a unique endpoint name | ||
| + | endpoint_name = f" | ||
| + | |||
| + | # Create the endpoint | ||
| + | endpoint = ManagedOnlineEndpoint( | ||
| + | name=endpoint_name, | ||
| + | description=" | ||
| + | auth_mode=" | ||
| + | ) | ||
| + | ml_client.begin_create_or_update(endpoint).result() | ||
| + | |||
| + | # Deploy the model | ||
| + | deployment = ManagedOnlineDeployment( | ||
| + | name=" | ||
| + | endpoint_name=endpoint_name, | ||
| + | model=registered_model, | ||
| + | environment=env, | ||
| + | code_configuration=CodeConfiguration( | ||
| + | code=" | ||
| + | scoring_script=" | ||
| + | ), | ||
| + | instance_type=" | ||
| + | instance_count=1 | ||
| + | ) | ||
| + | ml_client.begin_create_or_update(deployment).result() | ||
| + | |||
| + | # Set default deployment | ||
| + | existing_endpoint = ml_client.online_endpoints.get(name=endpoint_name) | ||
| + | existing_endpoint.defaults = {" | ||
| + | ml_client.begin_create_or_update(existing_endpoint).result() | ||
| + | |||
| + | print(f" | ||
| + | </ | ||
| + | |||
| + | test_endpoint.py | ||
| + | < | ||
| + | # | ||
| + | |||
| + | import requests | ||
| + | import json | ||
| + | |||
| + | # Replace this with your actual endpoint URL | ||
| + | endpoint = " | ||
| + | api_key = "< | ||
| + | |||
| + | headers = { | ||
| + | " | ||
| + | " | ||
| + | } | ||
| + | |||
| + | data = [ | ||
| + | { | ||
| + | " | ||
| + | " | ||
| + | " | ||
| + | " | ||
| + | " | ||
| + | " | ||
| + | " | ||
| + | " | ||
| + | " | ||
| + | " | ||
| + | } | ||
| + | ] | ||
| + | |||
| + | response = requests.post(endpoint, | ||
| + | |||
| + | print(" | ||
| + | print(response.text) | ||
| + | </ | ||
| + | |||
| + | ===== Important Observations and Commands ===== | ||
| + | |||
| + | - After successful deployment you have to gather 3 pieces of information. | ||
| + | - The endpoint name. This will be shown during deployment but will also be listed in endpoints under ML. | ||
| + | - The endpoint URL. This will be shown under the endpoint in the portal. | ||
| + | - The access key. Gotten by running a az cli command while logged in within a terminal. | ||
| + | - i.e. az ml online-endpoint get-credentials --name collectibles-endpoint-11ee416b --resource-group don-test-rg-SCUS --workspace-name don-ml-workspace-fixed | ||
| + | - You will then need to setup the endpoint to take traffic with another az command. Without this you will get an error saying it can't accept requests. | ||
| + | - i.e. az ml online-endpoint update --workspace don-ml-workspace-fixed --name collectibles-endpoint-11ee416b --resource-group don-test-rg-SCUS --traffic blue=100 | ||
| + | - What is in the score.py to define what data format is in use has to match the test script, which ChatGPT had a hard time with. | ||
| + | - You have to be on nothing newer than Python3.9 due to incompatibilities with anything newer. | ||
| + | - On Macs you have a controlled Python install if you've done it through homebrew so you need to install and use pihpx to create the venv to use for this install/ | ||
| + | - pipx install requests | ||
| + | - pipx inject requests azure-ai-textanalytics azure-core opencensus-ext-azure opencensus-ext-requests opencensus azure-cognitiveservices-speech openai opencensus-ext-azure fastapi uvicorn joblib scikit-learn pandas --include-deps | ||
| + | - This will put all the modules in the same directory so you won't have to mess with missing modules. | ||
| + | - You would use / | ||
| + | - This would be the first line (# | ||
| + | |||
| + | ===== Transcript of steps taken to make AI create scripts ===== | ||
| In **Azure Machine Learning (Azure ML)**, datasets used for training can be in a variety of formats. The platform is flexible, but depending on how you plan to use the dataset (e.g., for AutoML, custom training, or pipelines), certain formats are more appropriate. | In **Azure Machine Learning (Azure ML)**, datasets used for training can be in a variety of formats. The platform is flexible, but depending on how you plan to use the dataset (e.g., for AutoML, custom training, or pipelines), certain formats are more appropriate. | ||
| - | ==== ✅ Commonly Supported Formats | + | === ✅ Commonly Supported Formats === |
| ^ Format ^ Description ^ Notes ^ | ^ Format ^ Description ^ Notes ^ | ||
| Line 30: | Line 277: | ||
| ---- | ---- | ||
| - | ==== ✅ Where to Store ==== | + | === ✅ Where to Store === |
| Datasets must be accessible to Azure ML compute. Typical storage options: | Datasets must be accessible to Azure ML compute. Typical storage options: | ||
| Line 53: | Line 300: | ||
| dataset = Dataset.Tabular.from_delimited_files(path=(datastore, | dataset = Dataset.Tabular.from_delimited_files(path=(datastore, | ||
| dataset = dataset.register(workspace=ws, | dataset = dataset.register(workspace=ws, | ||
| - | |||
| </ | </ | ||
| Line 68: | Line 314: | ||
| ---- | ---- | ||
| - | ==== ✅ Option 1: Export a Table to CSV Using SQL (INTO OUTFILE) | + | === ✅ Option 1: Export a Table to CSV Using SQL (INTO OUTFILE) === |
| <code -> | <code -> | ||
| Line 77: | Line 323: | ||
| LINES TERMINATED BY ' | LINES TERMINATED BY ' | ||
| FROM my_table; | FROM my_table; | ||
| - | |||
| </ | </ | ||
| Line 88: | Line 333: | ||
| ---- | ---- | ||
| - | ==== ✅ Option 2: Use mysql CLI and sed/awk (CSV-like output) | + | === ✅ Option 2: Use mysql CLI and sed/awk (CSV-like output) === |
| <code -> | <code -> | ||
| bashCopyEditmysql -u username -p -e " | bashCopyEditmysql -u username -p -e " | ||
| | sed ' | | sed ' | ||
| - | |||
| </ | </ | ||
| Line 100: | Line 344: | ||
| ---- | ---- | ||
| - | ==== ✅ Option 3: Python Script to Dump Entire Database to CSV Files (per Table) | + | === ✅ Option 3: Python Script to Dump Entire Database to CSV Files (per Table) === |
| If you want a full database export (one CSV per table): | If you want a full database export (one CSV per table): | ||
| Line 121: | Line 365: | ||
| conn.close() | conn.close() | ||
| - | |||
| </ | </ | ||
| ---- | ---- | ||
| - | ==== 🔐 Permissions Reminder | + | === 🔐 Permissions Reminder === |
| If you're getting errors with '' | If you're getting errors with '' | ||
| Line 132: | Line 375: | ||
| * The **secure_file_priv** system variable:< | * The **secure_file_priv** system variable:< | ||
| sqlCopyEditSHOW VARIABLES LIKE ' | sqlCopyEditSHOW VARIABLES LIKE ' | ||
| - | |||
| </ | </ | ||
| * The MySQL user has '' | * The MySQL user has '' | ||
| sqlCopyEditGRANT FILE ON *.* TO ' | sqlCopyEditGRANT FILE ON *.* TO ' | ||
| - | |||
| </ | </ | ||
| Line 151: | Line 392: | ||
| ---- | ---- | ||
| - | ===== ✅ 1. Via Azure ML Studio (Web UI) ===== | + | === ✅ 1. Via Azure ML Studio (Web UI) === |
| - | ==== 📍 Steps: | + | == 📍 Steps: == |
| - Go to Azure ML Studio | - Go to Azure ML Studio | ||
| Line 168: | Line 409: | ||
| ---- | ---- | ||
| - | ===== ✅ 2. Via Python SDK ===== | + | === ✅ 2. Via Python SDK === |
| - | ==== 🔧 Example: Upload a CSV as a tabular dataset | + | == 🔧 Example: Upload a CSV as a tabular dataset == |
| <code -> | <code -> | ||
| Line 195: | Line 436: | ||
| create_new_version=True | create_new_version=True | ||
| ) | ) | ||
| - | |||
| </ | </ | ||
| Line 202: | Line 442: | ||
| ---- | ---- | ||
| - | ===== ✅ 3. Via Azure CLI ===== | + | === ✅ 3. Via Azure CLI === |
| <code -> | <code -> | ||
| Line 210: | Line 450: | ||
| --workspace-name my-ml-workspace \ | --workspace-name my-ml-workspace \ | ||
| --resource-group my-rg | --resource-group my-rg | ||
| - | |||
| </ | </ | ||
| Line 217: | Line 456: | ||
| ---- | ---- | ||
| - | ===== ✅ 4. From Azure Blob Storage or External URL ===== | + | === ✅ 4. From Azure Blob Storage or External URL === |
| If your data is already in Azure Blob: | If your data is already in Azure Blob: | ||
| Line 229: | Line 468: | ||
| dataset = Dataset.Tabular.from_delimited_files(path=(datastore, | dataset = Dataset.Tabular.from_delimited_files(path=(datastore, | ||
| dataset.register(workspace=ws, | dataset.register(workspace=ws, | ||
| - | |||
| </ | </ | ||
| Line 236: | Line 474: | ||
| <code -> | <code -> | ||
| pythonCopyEditdataset = Dataset.Tabular.from_delimited_files(" | pythonCopyEditdataset = Dataset.Tabular.from_delimited_files(" | ||
| - | |||
| </ | </ | ||
| ---- | ---- | ||
| - | ===== 🔄 Upload Types ===== | + | === 🔄 Upload Types === |
| ^ Type ^ Use Case ^ | ^ Type ^ Use Case ^ | ||
| Line 271: | Line 508: | ||
| ---- | ---- | ||
| - | ===== ✅ Goal ===== | + | === ✅ Goal === |
| You want to **input a character name** and retrieve **associated details** from your dataset. This suggests: | You want to **input a character name** and retrieve **associated details** from your dataset. This suggests: | ||
| Line 280: | Line 517: | ||
| ---- | ---- | ||
| - | ===== ✅ Step-by-Step Plan (Using Azure ML + OpenAI or Embeddings) | + | === ✅ Step-by-Step Plan (Using Azure ML + OpenAI or Embeddings) === |
| - | ==== Step 1: Understand Your Data ==== | + | == Step 1: Understand Your Data == |
| I'll examine your dataset. | I'll examine your dataset. | ||
| Line 298: | Line 535: | ||
| ---- | ---- | ||
| - | ===== ✅ ML-Based Query System Options | + | === ✅ ML-Based Query System Options === |
| Since you're aiming to **query by character** and get full details, you have two main paths: | Since you're aiming to **query by character** and get full details, you have two main paths: | ||
| Line 304: | Line 541: | ||
| ---- | ---- | ||
| - | ==== 🔹 Option 1: Traditional Filter-Based Query ==== | + | == 🔹 Option 1: Traditional Filter-Based Query == |
| No ML needed — just build a UI or API to filter rows where '' | No ML needed — just build a UI or API to filter rows where '' | ||
| Line 312: | Line 549: | ||
| ---- | ---- | ||
| - | ==== 🔹 Option 2: AI-Powered Search (Semantic or Fuzzy Matching) | + | == 🔹 Option 2: AI-Powered Search (Semantic or Fuzzy Matching) == |
| Use **OpenAI embeddings** + **Azure ML** to let the user type: | Use **OpenAI embeddings** + **Azure ML** to let the user type: | ||
| Line 320: | Line 557: | ||
| **Steps:** | **Steps:** | ||
| - | === Step 1: Generate Embeddings for Each Row === | + | = Step 1: Generate Embeddings for Each Row = |
| Use OpenAI' | Use OpenAI' | ||
| Line 326: | Line 563: | ||
| <code -> | <code -> | ||
| pythonCopyEditrow_text = f" | pythonCopyEditrow_text = f" | ||
| - | |||
| </ | </ | ||
| Line 335: | Line 571: | ||
| ---- | ---- | ||
| - | ===== ✅ Recommendation | + | === ✅ Recommendation === |
| Would you prefer: | Would you prefer: | ||
| Line 345: | Line 581: | ||
| I can generate code or deploy scripts for either path. Which would you like? | I can generate code or deploy scripts for either path. Which would you like? | ||
| - | == You said: == | + | = You said: = |
| I would like to us ML for this. Do you have suggestions for some example that would be more suited to using ML with this dataset? | I would like to us ML for this. Do you have suggestions for some example that would be more suited to using ML with this dataset? | ||
| Line 357: | Line 593: | ||
| ---- | ---- | ||
| - | ===== ✅ 1. Price Estimation (Regression) | + | === ✅ 1. Price Estimation (Regression) === |
| - | ==== 📌 Problem: | + | == 📌 Problem: == |
| Predict '' | Predict '' | ||
| - | ==== ✅ Why it fits: ==== | + | == ✅ Why it fits: == |
| * You have labeled numeric data. | * You have labeled numeric data. | ||
| * It's useful for appraising unlisted items. | * It's useful for appraising unlisted items. | ||
| - | ==== 🔧 ML Type: ==== | + | == 🔧 ML Type: == |
| * **Regression** (e.g., Linear Regression, XGBoost, AutoML) | * **Regression** (e.g., Linear Regression, XGBoost, AutoML) | ||
| Line 374: | Line 610: | ||
| ---- | ---- | ||
| - | ===== ✅ 2. Category Prediction (Classification) | + | === ✅ 2. Category Prediction (Classification) === |
| - | ==== 📌 Problem: | + | == 📌 Problem: == |
| Predict the '' | Predict the '' | ||
| - | ==== ✅ Why it fits: ==== | + | == ✅ Why it fits: == |
| * Helps auto-label new entries. | * Helps auto-label new entries. | ||
| * Enables predictive organization/ | * Enables predictive organization/ | ||
| - | ==== 🔧 ML Type: ==== | + | == 🔧 ML Type: == |
| * **Classification** | * **Classification** | ||
| Line 391: | Line 627: | ||
| ---- | ---- | ||
| - | ===== ✅ 3. Clustering Characters by Similarity (Unsupervised) | + | === ✅ 3. Clustering Characters by Similarity (Unsupervised) === |
| - | ==== 📌 Problem: | + | == 📌 Problem: == |
| Group similar characters or figures automatically to find patterns or suggest groupings. | Group similar characters or figures automatically to find patterns or suggest groupings. | ||
| - | ==== ✅ Why it fits: ==== | + | == ✅ Why it fits: == |
| * Explores hidden structure. | * Explores hidden structure. | ||
| * Could be visualized or used to build recommendation systems. | * Could be visualized or used to build recommendation systems. | ||
| - | ==== 🔧 ML Type: ==== | + | == 🔧 ML Type: == |
| * **Clustering (e.g., KMeans)** using embeddings of text descriptions. | * **Clustering (e.g., KMeans)** using embeddings of text descriptions. | ||
| Line 408: | Line 644: | ||
| ---- | ---- | ||
| - | ===== ✅ 4. Anomaly Detection | + | === ✅ 4. Anomaly Detection === |
| - | ==== 📌 Problem: | + | == 📌 Problem: == |
| Find collectibles that are priced much higher/ | Find collectibles that are priced much higher/ | ||
| - | ==== ✅ Why it fits: ==== | + | == ✅ Why it fits: == |
| * Helps find mispriced entries or rare items. | * Helps find mispriced entries or rare items. | ||
| - | ==== 🔧 ML Type: ==== | + | == 🔧 ML Type: == |
| * **Anomaly Detection (Isolation Forest, One-Class SVM)** | * **Anomaly Detection (Isolation Forest, One-Class SVM)** | ||
| Line 424: | Line 660: | ||
| ---- | ---- | ||
| - | ===== ✅ 5. Embedding-Based Semantic Search (LLM-enhanced ML) ===== | + | === ✅ 5. Embedding-Based Semantic Search (LLM-enhanced ML) === |
| - | ==== 📌 Problem: | + | == 📌 Problem: == |
| Let users search using natural language and get intelligent matches. | Let users search using natural language and get intelligent matches. | ||
| - | ==== ✅ Why it fits: ==== | + | == ✅ Why it fits: == |
| * Transforms character/ | * Transforms character/ | ||
| * Allows vector similarity search over the dataset. | * Allows vector similarity search over the dataset. | ||
| - | ==== 🔧 Tools: | + | == 🔧 Tools: == |
| * Azure OpenAI (for embeddings) | * Azure OpenAI (for embeddings) | ||
| Line 443: | Line 679: | ||
| ---- | ---- | ||
| - | ===== 🧠 Suggestion | + | === 🧠 Suggestion === |
| To get started with a real ML training task: | To get started with a real ML training task: | ||
| - | ==== ▶️ Start with Price Estimation using Regression: | + | == ▶️ Start with Price Estimation using Regression: == |
| * **Input**: '' | * **Input**: '' | ||
| Line 454: | Line 690: | ||
| Would you like me to generate a full training script (e.g., in Python with scikit-learn or in Azure ML SDK) for this? | Would you like me to generate a full training script (e.g., in Python with scikit-learn or in Azure ML SDK) for this? | ||
| - | == You said: == | + | = You said: = |
| Yes | Yes | ||
| Line 462: | Line 698: | ||
| Your regression model has been successfully trained to predict '' | Your regression model has been successfully trained to predict '' | ||
| - | ==== ✅ Root Mean Squared Error (RMSE): ~30.15 | + | == ✅ Root Mean Squared Error (RMSE): ~30.15 == |
| This means that, on average, the model' | This means that, on average, the model' | ||
| Line 468: | Line 704: | ||
| ---- | ---- | ||
| - | ===== 🧪 Next Steps You Can Take ===== | + | === 🧪 Next Steps You Can Take === |
| - | ==== 🔧 Improvements | + | == 🔧 Improvements == |
| * Use more advanced models (e.g., **XGBoost**, | * Use more advanced models (e.g., **XGBoost**, | ||
| Line 525: | Line 761: | ||
| bashCopyEditpip install fastapi uvicorn joblib scikit-learn pandas | bashCopyEditpip install fastapi uvicorn joblib scikit-learn pandas | ||
| uvicorn deploy:app --reload | uvicorn deploy:app --reload | ||
| - | |||
| </ | </ | ||
| Line 532: | Line 767: | ||
| <code -> | <code -> | ||
| bashCopyEditpython test_inference.py | bashCopyEditpython test_inference.py | ||
| - | |||
| </ | </ | ||
| Line 539: | Line 773: | ||
| <code -> | <code -> | ||
| jsonCopyEdit{" | jsonCopyEdit{" | ||
| - | |||
| </ | </ | ||
| Line 611: | Line 844: | ||
| request_file=" | request_file=" | ||
| ) | ) | ||
| - | |||
| </ | </ | ||
| Line 657: | Line 889: | ||
| - **Install Azure ML SDK**< | - **Install Azure ML SDK**< | ||
| bashCopyEditpip install azure-ai-ml azure-identity | bashCopyEditpip install azure-ai-ml azure-identity | ||
| - | |||
| </ | </ | ||
| - **Set up Azure CLI & Login**< | - **Set up Azure CLI & Login**< | ||
| bashCopyEditaz login | bashCopyEditaz login | ||
| - | |||
| </ | </ | ||
| - **Set the following in the script**: | - **Set the following in the script**: | ||
| Line 678: | Line 908: | ||
| <code -> | <code -> | ||
| bashCopyEditpython deploy_to_azureml.py | bashCopyEditpython deploy_to_azureml.py | ||
| - | |||
| </ | </ | ||
| Line 704: | Line 933: | ||
| print(response) | print(response) | ||
| - | |||
| </ | </ | ||
| Line 711: | Line 939: | ||
| == You said: == | == You said: == | ||
| - | Got an error during deployment: Traceback (most recent call last): File "/ | + | Got an error during deployment: |
| - | + | ||
| - | | + | |
| Thanks for the detailed error — this happens because the model was registered as '' | Thanks for the detailed error — this happens because the model was registered as '' | ||
| Line 732: | Line 958: | ||
| type=" | type=" | ||
| ) | ) | ||
| - | |||
| </ | </ | ||
| Line 744: | Line 969: | ||
| type=" | type=" | ||
| ) | ) | ||
| - | |||
| </ | </ | ||
| Line 754: | Line 978: | ||
| * '' | * '' | ||
| - | ---- | + | New Error: |
| - | + | ||
| - | Would you like me to regenerate the corrected script and upload it again for convenience? | + | |
| - | + | ||
| - | == You said: == | + | |
| - | + | ||
| - | New error: File "/ | + | |
| - | + | ||
| - | | + | |
| Thanks for the trace. The error means that the environment definition is missing **either**: | Thanks for the trace. The error means that the environment definition is missing **either**: | ||
| Line 784: | Line 1000: | ||
| description=" | description=" | ||
| ) | ) | ||
| - | |||
| </ | </ | ||
| Line 801: | Line 1016: | ||
| conda_file=" | conda_file=" | ||
| ) | ) | ||
| - | |||
| </ | </ | ||
| Line 812: | Line 1026: | ||
| description=" | description=" | ||
| ) | ) | ||
| - | |||
| </ | </ | ||
| Line 819: | Line 1032: | ||
| Would you like me to regenerate the **fully corrected deployment script** ('' | Would you like me to regenerate the **fully corrected deployment script** ('' | ||
| - | == You said: == | + | New Error: |
| - | + | ||
| - | New error: Traceback (most recent call last): File "/ | + | |
| - | + | ||
| - | | + | |
| Thanks again — you’re using the **Azure ML SDK v2**, where '' | Thanks again — you’re using the **Azure ML SDK v2**, where '' | ||
| Line 842: | Line 1051: | ||
| conda_file=" | conda_file=" | ||
| ) | ) | ||
| - | |||
| </ | </ | ||
| Line 860: | Line 1068: | ||
| Let me know! | Let me know! | ||
| - | == You said: == | + | New Error: |
| - | + | ||
| - | New error: Traceback (most recent call last): File "/ | + | |
| - | + | ||
| - | | + | |
| This error indicates that the **Azure ML endpoint creation is referencing a non-existent Azure Container Registry (ACR)**: | This error indicates that the **Azure ML endpoint creation is referencing a non-existent Azure Container Registry (ACR)**: | ||
| Line 910: | Line 1114: | ||
| pythonCopyEditworkspace_name = " | pythonCopyEditworkspace_name = " | ||
| resource_group = " | resource_group = " | ||
| - | |||
| </ | </ | ||
| Line 961: | Line 1164: | ||
| --resource-group don-test-rg-SCUS \ | --resource-group don-test-rg-SCUS \ | ||
| --query container_registry | --query container_registry | ||
| - | |||
| </ | </ | ||
| Line 968: | Line 1170: | ||
| <code -> | <code -> | ||
| jsonCopyEdit"/ | jsonCopyEdit"/ | ||
| - | |||
| </ | </ | ||
| Line 981: | Line 1182: | ||
| <code -> | <code -> | ||
| bashCopyEditaz acr list --resource-group don-test-rg-SCUS --output table | bashCopyEditaz acr list --resource-group don-test-rg-SCUS --output table | ||
| - | |||
| </ | </ | ||
| Line 1003: | Line 1203: | ||
| --resource-group don-test-rg-SCUS \ | --resource-group don-test-rg-SCUS \ | ||
| --location southcentralus | --location southcentralus | ||
| - | |||
| </ | </ | ||
| Line 1025: | Line 1224: | ||
| <code -> | <code -> | ||
| bashCopyEditMicrosoft.ContainerRegistry/ | bashCopyEditMicrosoft.ContainerRegistry/ | ||
| - | |||
| </ | </ | ||
| Line 1032: | Line 1230: | ||
| * **Deleted**, | * **Deleted**, | ||
| * **Inaccessible** (e.g., region mismatch, permission issue, or soft-deleted) | * **Inaccessible** (e.g., region mismatch, permission issue, or soft-deleted) | ||
| - | |||
| - | ---- | ||
| ===== ✅ Next Steps to Resolve ===== | ===== ✅ Next Steps to Resolve ===== | ||
| Line 1044: | Line 1240: | ||
| bashCopyEditaz acr show \ | bashCopyEditaz acr show \ | ||
| --ids "/ | --ids "/ | ||
| - | |||
| </ | </ | ||
| Line 1060: | Line 1255: | ||
| --resource-group don-test-rg-SCUS \ | --resource-group don-test-rg-SCUS \ | ||
| --location southcentralus | --location southcentralus | ||
| - | |||
| </ | </ | ||
| Line 1078: | Line 1272: | ||
| pythonCopyEditworkspace_name = " | pythonCopyEditworkspace_name = " | ||
| resource_group = " | resource_group = " | ||
| - | |||
| </ | </ | ||
| Line 1091: | Line 1284: | ||
| - Create a new ACR:< | - Create a new ACR:< | ||
| bashCopyEditaz acr create --name mynewacr123 --resource-group don-test-rg-SCUS --sku Basic | bashCopyEditaz acr create --name mynewacr123 --resource-group don-test-rg-SCUS --sku Basic | ||
| - | |||
| </ | </ | ||
| - Manually patch the ML workspace to use it (involves ARM template or REST call — fragile). | - Manually patch the ML workspace to use it (involves ARM template or REST call — fragile). | ||
| Line 1124: | Line 1316: | ||
| <code -> | <code -> | ||
| bashCopyEdit.../ | bashCopyEdit.../ | ||
| - | |||
| </ | </ | ||
| Line 1131: | Line 1322: | ||
| <code -> | <code -> | ||
| bashCopyEdit1bc3bfe4a5644d918283c13025dc9d8c | bashCopyEdit1bc3bfe4a5644d918283c13025dc9d8c | ||
| - | |||
| </ | </ | ||
| Line 1142: | Line 1332: | ||
| --name 1bc3bfe4a5644d918283c13025dc9d8c \ | --name 1bc3bfe4a5644d918283c13025dc9d8c \ | ||
| --resource-group don-test-rg-SCUS | --resource-group don-test-rg-SCUS | ||
| - | |||
| </ | </ | ||
| Line 1152: | Line 1341: | ||
| * **If it doesn’t**: | * **If it doesn’t**: | ||
| bashCopyEdit(ResourceNotFound) The Resource ' | bashCopyEdit(ResourceNotFound) The Resource ' | ||
| - | |||
| </ | </ | ||
| Line 1159: | Line 1347: | ||
| == You said: == | == You said: == | ||
| - | az acr show \ --name 1bc3bfe4a5644d918283c13025dc9d8c \ --resource-group don-test-rg-SCUS (ResourceNotFound) The Resource ' | + | az acr show \ --name 1bc3bfe4a5644d918283c13025dc9d8c \ --resource-group don-test-rg-SCUS (ResourceNotFound) The Resource ' |
| | | ||
| Thanks — this confirms that the Azure Container Registry (ACR) linked to your ML workspace has been **deleted** or is **permanently unavailable**, | Thanks — this confirms that the Azure Container Registry (ACR) linked to your ML workspace has been **deleted** or is **permanently unavailable**, | ||
| - | |||
| - | ---- | ||
| ===== ✅ Final Fix: Create a New Workspace (Auto-links a Valid ACR) ===== | ===== ✅ Final Fix: Create a New Workspace (Auto-links a Valid ACR) ===== | ||
| Line 1178: | Line 1364: | ||
| --resource-group don-test-rg-SCUS \ | --resource-group don-test-rg-SCUS \ | ||
| --location southcentralus | --location southcentralus | ||
| - | |||
| </ | </ | ||
| Line 1192: | Line 1377: | ||
| pythonCopyEditworkspace_name = " | pythonCopyEditworkspace_name = " | ||
| resource_group = " | resource_group = " | ||
| - | |||
| </ | </ | ||
| Line 1199: | Line 1383: | ||
| <code -> | <code -> | ||
| bashCopyEditpython deploy_to_azureml.py | bashCopyEditpython deploy_to_azureml.py | ||
| - | |||
| </ | </ | ||
| Line 1215: | Line 1398: | ||
| Would you like me to repackage the **deployment script** and **endpoint test script** with the new workspace values set for convenience? | Would you like me to repackage the **deployment script** and **endpoint test script** with the new workspace values set for convenience? | ||
| - | == You said: == | + | New Error with new environment |
| - | + | ||
| - | New error with new environment: Traceback (most recent call last): File "/ | ||