|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
Getting Started with Hugging Face Datasets |
|
|
|
|
|
This marimo notebook works in two modes: |
|
|
- Interactive: uvx marimo edit --sandbox getting-started.py |
|
|
- Script: uv run getting-started.py --dataset squad |
|
|
|
|
|
Same file, two experiences. |
|
|
""" |
|
|
|
|
|
import marimo |
|
|
|
|
|
app = marimo.App(width="medium") |
|
|
|
|
|
|
|
|
@app.cell |
|
|
def _(): |
|
|
import marimo as mo |
|
|
return (mo,) |
|
|
|
|
|
|
|
|
@app.cell |
|
|
def _(mo): |
|
|
mo.md( |
|
|
""" |
|
|
# Getting Started with Hugging Face Datasets |
|
|
|
|
|
This notebook shows how to load and explore datasets from the Hugging Face Hub. |
|
|
|
|
|
**Run this notebook:** |
|
|
- Interactive: `uvx marimo edit --sandbox getting-started.py` |
|
|
- As a script: `uv run getting-started.py --dataset squad` |
|
|
""" |
|
|
) |
|
|
return |
|
|
|
|
|
|
|
|
@app.cell |
|
|
def _(mo): |
|
|
mo.md( |
|
|
""" |
|
|
## Step 1: Configure |
|
|
|
|
|
Choose which dataset to load. In interactive mode, use the controls below. |
|
|
In script mode, pass `--dataset` argument. |
|
|
""" |
|
|
) |
|
|
return |
|
|
|
|
|
|
|
|
@app.cell |
|
|
def _(mo): |
|
|
import argparse |
|
|
|
|
|
|
|
|
parser = argparse.ArgumentParser() |
|
|
parser.add_argument("--dataset", default="stanfordnlp/imdb") |
|
|
parser.add_argument("--split", default="train") |
|
|
parser.add_argument("--samples", type=int, default=5) |
|
|
args, _ = parser.parse_known_args() |
|
|
|
|
|
|
|
|
dataset_input = mo.ui.text(value=args.dataset, label="Dataset") |
|
|
split_input = mo.ui.dropdown(["train", "test", "validation"], value=args.split, label="Split") |
|
|
samples_input = mo.ui.slider(1, 20, value=args.samples, label="Samples") |
|
|
|
|
|
mo.hstack([dataset_input, split_input, samples_input]) |
|
|
return args, argparse, dataset_input, parser, samples_input, split_input |
|
|
|
|
|
|
|
|
@app.cell |
|
|
def _(args, dataset_input, mo, samples_input, split_input): |
|
|
|
|
|
dataset_name = dataset_input.value or args.dataset |
|
|
split_name = split_input.value or args.split |
|
|
num_samples = samples_input.value or args.samples |
|
|
|
|
|
print(f"Dataset: {dataset_name}, Split: {split_name}, Samples: {num_samples}") |
|
|
return dataset_name, num_samples, split_name |
|
|
|
|
|
|
|
|
@app.cell |
|
|
def _(mo): |
|
|
mo.md( |
|
|
""" |
|
|
## Step 2: Load Dataset |
|
|
|
|
|
We use the `datasets` library to stream data directly from the Hub. |
|
|
No need to download the entire dataset first! |
|
|
""" |
|
|
) |
|
|
return |
|
|
|
|
|
|
|
|
@app.cell |
|
|
def _(dataset_name, split_name): |
|
|
from datasets import load_dataset |
|
|
|
|
|
print(f"Loading {dataset_name}...") |
|
|
dataset = load_dataset(dataset_name, split=split_name) |
|
|
print(f"Loaded {len(dataset):,} rows") |
|
|
print(f"Features: {list(dataset.features.keys())}") |
|
|
return dataset, load_dataset |
|
|
|
|
|
|
|
|
@app.cell |
|
|
def _(mo): |
|
|
mo.md( |
|
|
""" |
|
|
## Step 3: Explore the Data |
|
|
|
|
|
Let's look at a few samples from the dataset. |
|
|
""" |
|
|
) |
|
|
return |
|
|
|
|
|
|
|
|
@app.cell |
|
|
def _(dataset, mo, num_samples): |
|
|
|
|
|
samples = dataset.select(range(min(num_samples, len(dataset)))) |
|
|
df = samples.to_pandas() |
|
|
|
|
|
|
|
|
for col in df.select_dtypes(include=["object"]).columns: |
|
|
df[col] = df[col].apply(lambda x: str(x)[:200] + "..." if len(str(x)) > 200 else x) |
|
|
|
|
|
print(df.to_string()) |
|
|
mo.ui.table(df) |
|
|
return df, samples |
|
|
|
|
|
|
|
|
@app.cell |
|
|
def _(mo): |
|
|
mo.md( |
|
|
""" |
|
|
## Next Steps |
|
|
|
|
|
- Try different datasets: `squad`, `emotion`, `wikitext` |
|
|
- Run on HF Jobs: `hf jobs uv run --flavor cpu-basic ... getting-started.py` |
|
|
- Check out more UV scripts at [uv-scripts](https://huggingface.co/uv-scripts) |
|
|
""" |
|
|
) |
|
|
return |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
app.run() |
|
|
|