course/videos/offset_mapping.ipynb (101 lines of code) (raw):

{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "This notebook regroups the code sample of the video below, which is a part of the [Hugging Face course](https://huggingface.co/course)." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "cellView": "form" }, "outputs": [ { "data": { "text/html": [ "<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/3umI3tm27Vw?rel=0&amp;controls=0&amp;showinfo=0\" frameborder=\"0\" allowfullscreen></iframe>" ], "text/plain": [ "<IPython.core.display.HTML object>" ] }, "execution_count": null, "metadata": {}, "output_type": "execute_result" } ], "source": [ "#@title\n", "from IPython.display import HTML\n", "\n", "HTML('<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/3umI3tm27Vw?rel=0&amp;controls=0&amp;showinfo=0\" frameborder=\"0\" allowfullscreen></iframe>')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Install the Transformers and Datasets libraries to run this notebook." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "! pip install datasets transformers[sentencepiece]" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from transformers import AutoTokenizer\n", "\n", "tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n", "print(tokenizer(\"Let's talk about tokenizers superpowers.\")[\"input_ids\"])\n", "print(tokenizer(\"Let's talk about tokenizers superpowers.\")[\"input_ids\"])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "encoding = tokenizer(\"Let's talk about tokenizers superpowers.\")\n", "print(encoding.tokens())\n", "print(encoding.word_ids())" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "encoding = tokenizer(\n", " \"Let's talk about tokenizers superpowers.\",\n", " return_offsets_mapping=True\n", ")\n", "print(encoding.tokens())\n", "print(encoding[\"offset_mapping\"])" ] } ], "metadata": { "colab": { "name": "Fast tokenizer superpowers", "provenance": [] } }, "nbformat": 4, "nbformat_minor": 4 }