help@rskworld.in +91 93305 39277
RSK World
  • Home
  • Development
    • Web Development
    • Mobile Apps
    • Software
    • Games
    • Project
  • Technologies
    • Data Science
    • AI Development
    • Cloud Development
    • Blockchain
    • Cyber Security
    • Dev Tools
    • Testing Tools
  • About
  • Contact

Theme Settings

Color Scheme
Display Options
Font Size
100%
Back to Project
RSK World
dask-parallel
/
notebooks
RSK World
dask-parallel
Parallel and distributed computing with Dask
notebooks
  • 01_dask_arrays.ipynb4.2 KB
  • 02_dask_dataframes.ipynb5 KB
  • 03_delayed_computations.ipynb5.2 KB
  • 04_distributed_computing.ipynb4.8 KB
  • 05_task_scheduling.ipynb5.4 KB
  • 06_dask_bags.ipynb5.3 KB
  • 07_advanced_dataframes.ipynb6.7 KB
  • 08_dask_ml.ipynb7.2 KB
08_dask_ml.ipynb
notebooks/08_dask_ml.ipynb
Raw Download
Find: Go to:
{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "# Machine Learning with Dask\n",
        "\n",
        "<!--\n",
        "Project: Dask Parallel Computing\n",
        "Author: Molla Samser\n",
        "Designer & Tester: Rima Khatun\n",
        "Website: https://rskworld.in\n",
        "Email: help@rskworld.in, support@rskworld.in\n",
        "Phone: +91 93305 39277\n",
        "-->\n",
        "\n",
        "This notebook demonstrates machine learning with Dask, including parallel model training, hyperparameter tuning, and large-scale data preprocessing.\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "import dask.array as da\n",
        "import dask.dataframe as dd\n",
        "import numpy as np\n",
        "import pandas as pd\n",
        "from sklearn.datasets import make_classification\n",
        "from sklearn.model_selection import train_test_split\n",
        "import time\n",
        "\n",
        "# Note: dask-ml is optional, using sklearn with Dask arrays for demonstration\n",
        "print(\"Machine Learning with Dask\")\n",
        "print(\"=\" * 50)\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Creating Large ML Dataset\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# Generate large classification dataset\n",
        "print(\"Generating large dataset...\")\n",
        "X, y = make_classification(\n",
        "    n_samples=100000,\n",
        "    n_features=50,\n",
        "    n_informative=30,\n",
        "    n_redundant=10,\n",
        "    n_classes=3,\n",
        "    random_state=42\n",
        ")\n",
        "\n",
        "# Convert to Dask arrays\n",
        "X_dask = da.from_array(X, chunks=(10000, 50))\n",
        "y_dask = da.from_array(y, chunks=10000)\n",
        "\n",
        "print(f\"Dataset shape: {X_dask.shape}\")\n",
        "print(f\"Number of chunks: {X_dask.numblocks}\")\n",
        "print(f\"Classes: {np.unique(y)}\")\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Parallel Data Preprocessing\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "from sklearn.preprocessing import StandardScaler\n",
        "from dask import delayed\n",
        "\n",
        "# Standardize features in parallel\n",
        "print(\"Standardizing features...\")\n",
        "start_time = time.time()\n",
        "\n",
        "# Compute mean and std in parallel\n",
        "mean = X_dask.mean(axis=0).compute()\n",
        "std = X_dask.std(axis=0).compute()\n",
        "\n",
        "# Normalize\n",
        "X_normalized = (X_dask - mean) / (std + 1e-8)\n",
        "\n",
        "# Compute a sample to verify\n",
        "sample = X_normalized[:1000, :].compute()\n",
        "\n",
        "end_time = time.time()\n",
        "\n",
        "print(f\"Normalization completed in {end_time - start_time:.2f} seconds\")\n",
        "print(f\"Sample mean: {sample.mean():.6f}\")\n",
        "print(f\"Sample std: {sample.std():.6f}\")\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Parallel Model Training\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "from sklearn.ensemble import RandomForestClassifier\n",
        "from sklearn.linear_model import LogisticRegression\n",
        "from dask import delayed, compute\n",
        "\n",
        "# Split data\n",
        "X_train, X_test, y_train, y_test = train_test_split(\n",
        "    X, y, test_size=0.2, random_state=42\n",
        ")\n",
        "\n",
        "# Convert to Dask arrays\n",
        "X_train_dask = da.from_array(X_train, chunks=(5000, 50))\n",
        "X_test_dask = da.from_array(X_test, chunks=(5000, 50))\n",
        "\n",
        "# Train multiple models in parallel\n",
        "@delayed\n",
        "def train_model(model_class, X, y):\n",
        "    model = model_class()\n",
        "    model.fit(X, y)\n",
        "    return model\n",
        "\n",
        "@delayed\n",
        "def evaluate_model(model, X, y):\n",
        "    return model.score(X, y)\n",
        "\n",
        "# Train different models in parallel\n",
        "print(\"Training models in parallel...\")\n",
        "start_time = time.time()\n",
        "\n",
        "rf_model = train_model(RandomForestClassifier, X_train, y_train)\n",
        "lr_model = train_model(LogisticRegression, X_train, y_train)\n",
        "\n",
        "# Evaluate in parallel\n",
        "rf_score = evaluate_model(rf_model, X_test, y_test)\n",
        "lr_score = evaluate_model(lr_model, X_test, y_test)\n",
        "\n",
        "rf_score_val, lr_score_val = compute(rf_score, lr_score)\n",
        "\n",
        "end_time = time.time()\n",
        "\n",
        "print(f\"Training completed in {end_time - start_time:.2f} seconds\")\n",
        "print(f\"Random Forest Score: {rf_score_val:.4f}\")\n",
        "print(f\"Logistic Regression Score: {lr_score_val:.4f}\")\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Hyperparameter Tuning\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "from sklearn.model_selection import GridSearchCV\n",
        "from dask.distributed import Client, LocalCluster\n",
        "\n",
        "# Set up cluster for parallel hyperparameter tuning\n",
        "cluster = LocalCluster(n_workers=4, threads_per_worker=1)\n",
        "client = Client(cluster)\n",
        "\n",
        "print(f\"Cluster Dashboard: {client.dashboard_link}\")\n",
        "\n",
        "# Define parameter grid\n",
        "param_grid = {\n",
        "    'n_estimators': [50, 100, 200],\n",
        "    'max_depth': [10, 20, None]\n",
        "}\n",
        "\n",
        "# Use smaller dataset for demonstration\n",
        "X_small, y_small = X[:10000], y[:10000]\n",
        "\n",
        "# Grid search with parallel execution\n",
        "print(\"\\nPerforming grid search...\")\n",
        "start_time = time.time()\n",
        "\n",
        "grid_search = GridSearchCV(\n",
        "    RandomForestClassifier(random_state=42),\n",
        "    param_grid,\n",
        "    cv=3,\n",
        "    n_jobs=-1,\n",
        "    verbose=1\n",
        ")\n",
        "\n",
        "grid_search.fit(X_small, y_small)\n",
        "\n",
        "end_time = time.time()\n",
        "\n",
        "print(f\"\\nGrid search completed in {end_time - start_time:.2f} seconds\")\n",
        "print(f\"Best parameters: {grid_search.best_params_}\")\n",
        "print(f\"Best score: {grid_search.best_score_:.4f}\")\n",
        "\n",
        "client.close()\n",
        "cluster.close()\n"
      ]
    }
  ],
  "metadata": {
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 2
}
231 lines•7.2 KB
json

About RSK World

Founded by Molla Samser, with Designer & Tester Rima Khatun, RSK World is your one-stop destination for free programming resources, source code, and development tools.

Founder: Molla Samser
Designer & Tester: Rima Khatun

Development

  • Game Development
  • Web Development
  • Mobile Development
  • AI Development
  • Development Tools

Legal

  • Terms & Conditions
  • Privacy Policy
  • Disclaimer

Contact Info

Nutanhat, Mongolkote
Purba Burdwan, West Bengal
India, 713147

+91 93305 39277

hello@rskworld.in
support@rskworld.in

© 2026 RSK World. All rights reserved.

Content used for educational purposes only. View Disclaimer