File size: 3,891 Bytes
55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d e9aa104 55b755d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "marimo",
# "datasets",
# "huggingface-hub",
# ]
# ///
"""
Getting Started with Hugging Face Datasets
This marimo notebook works in two modes:
- Interactive: uvx marimo edit --sandbox getting-started.py
- Script: uv run getting-started.py --dataset squad
Same file, two experiences.
"""
import marimo
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell
def _(mo):
mo.md(
"""
# Getting Started with Hugging Face Datasets
This notebook shows how to load and explore datasets from the Hugging Face Hub.
**Run this notebook:**
- Interactive: `uvx marimo edit --sandbox getting-started.py`
- As a script: `uv run getting-started.py --dataset squad`
"""
)
return
@app.cell
def _(mo):
mo.md(
"""
## Step 1: Configure
Choose which dataset to load. In interactive mode, use the controls below.
In script mode, pass `--dataset` argument.
"""
)
return
@app.cell
def _(mo):
import argparse
# Parse CLI args (works in both modes)
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", default="stanfordnlp/imdb")
parser.add_argument("--split", default="train")
parser.add_argument("--samples", type=int, default=5)
args, _ = parser.parse_known_args()
# Interactive controls (only shown in notebook mode)
dataset_input = mo.ui.text(value=args.dataset, label="Dataset")
split_input = mo.ui.dropdown(["train", "test", "validation"], value=args.split, label="Split")
samples_input = mo.ui.slider(1, 20, value=args.samples, label="Samples")
mo.hstack([dataset_input, split_input, samples_input])
return args, argparse, dataset_input, parser, samples_input, split_input
@app.cell
def _(args, dataset_input, mo, samples_input, split_input):
# Use interactive values if available, otherwise CLI args
dataset_name = dataset_input.value or args.dataset
split_name = split_input.value or args.split
num_samples = samples_input.value or args.samples
print(f"Dataset: {dataset_name}, Split: {split_name}, Samples: {num_samples}")
return dataset_name, num_samples, split_name
@app.cell
def _(mo):
mo.md(
"""
## Step 2: Load Dataset
We use the `datasets` library to stream data directly from the Hub.
No need to download the entire dataset first!
"""
)
return
@app.cell
def _(dataset_name, split_name):
from datasets import load_dataset
print(f"Loading {dataset_name}...")
dataset = load_dataset(dataset_name, split=split_name)
print(f"Loaded {len(dataset):,} rows")
print(f"Features: {list(dataset.features.keys())}")
return dataset, load_dataset
@app.cell
def _(mo):
mo.md(
"""
## Step 3: Explore the Data
Let's look at a few samples from the dataset.
"""
)
return
@app.cell
def _(dataset, mo, num_samples):
# Select samples and display
samples = dataset.select(range(min(num_samples, len(dataset))))
df = samples.to_pandas()
# Truncate long text for display
for col in df.select_dtypes(include=["object"]).columns:
df[col] = df[col].apply(lambda x: str(x)[:200] + "..." if len(str(x)) > 200 else x)
print(df.to_string()) # Shows in script mode
mo.ui.table(df) # Shows in interactive mode
return df, samples
@app.cell
def _(mo):
mo.md(
"""
## Next Steps
- Try different datasets: `squad`, `emotion`, `wikitext`
- Run on HF Jobs: `hf jobs uv run --flavor cpu-basic ... getting-started.py`
- Check out more UV scripts at [uv-scripts](https://huggingface.co/uv-scripts)
"""
)
return
if __name__ == "__main__":
app.run()
|