course/videos/tensorflow_finetuning.ipynb (196 lines of code) (raw):
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook regroups the code sample of the video below, which is a part of the [Hugging Face course](https://huggingface.co/course)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form"
},
"outputs": [
{
"data": {
"text/html": [
"<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/AUozVp78dhk?rel=0&controls=0&showinfo=0\" frameborder=\"0\" allowfullscreen></iframe>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#@title\n",
"from IPython.display import HTML\n",
"\n",
"HTML('<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/AUozVp78dhk?rel=0&controls=0&showinfo=0\" frameborder=\"0\" allowfullscreen></iframe>')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Install the Transformers and Datasets libraries to run this notebook."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"! pip install datasets transformers[sentencepiece]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook regroups the code sample of the video below, which is a part of the [Hugging Face course](https://huggingface.co/course)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form"
},
"outputs": [
{
"data": {
"text/html": [
"<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/alq1l8Lv9GA?rel=0&controls=0&showinfo=0\" frameborder=\"0\" allowfullscreen></iframe>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#@title\n",
"from IPython.display import HTML\n",
"\n",
"HTML('<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/alq1l8Lv9GA?rel=0&controls=0&showinfo=0\" frameborder=\"0\" allowfullscreen></iframe>')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Install the Transformers and Datasets libraries to run this notebook."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"! pip install datasets transformers[sentencepiece]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from datasets import load_dataset\n",
"from transformers import AutoTokenizer\n",
"import numpy as np\n",
"\n",
"raw_datasets = load_dataset(\"glue\", \"mrpc\")\n",
"checkpoint = \"bert-base-uncased\"\n",
"tokenizer = AutoTokenizer.from_pretrained(checkpoint)\n",
"\n",
"def tokenize_dataset(dataset):\n",
" encoded = tokenizer(\n",
" dataset[\"sentence1\"],\n",
" dataset[\"sentence2\"],\n",
" max_length=128,\n",
" truncation=True,\n",
" )\n",
" return encoded.data\n",
"\n",
"tokenized_datasets = raw_datasets.map(tokenize_dataset, batched=True)\n",
"\n",
"train_dataset = tokenized_datasets[\"train\"].to_tf_dataset(\n",
" columns=[\"input_ids\", \"attention_mask\", \"token_type_ids\"],\n",
" label_cols=[\"label\"],\n",
" shuffle=True,\n",
" batch_size=8)\n",
"\n",
"validation_dataset = tokenized_datasets[\"validation\"].to_tf_dataset(\n",
" columns=[\"input_ids\", \"attention_mask\", \"token_type_ids\"],\n",
" label_cols=[\"label\"],\n",
" shuffle=True,\n",
" batch_size=8)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"next(iter(train_dataset))[1]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import tensorflow as tf\n",
"from transformers import TFAutoModelForSequenceClassification\n",
"\n",
"checkpoint = 'bert-base-cased'\n",
"model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)\n",
"loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n",
"model.compile(optimizer='adam', loss=loss)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.fit(\n",
" train_dataset,\n",
" validation_data=validation_dataset,\n",
" epochs=3\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"colab": {
"name": "Fine-Tuning with TensorFlow",
"provenance": []
}
},
"nbformat": 4,
"nbformat_minor": 4
}