05_create_dataset/05_split_tfrecord.ipynb (592 lines of code) (raw):
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 72
},
"id": "0uUeDqA32K9o",
"outputId": "27b66765-ee49-4504-f32e-f34776c4f3b4"
},
"outputs": [
{
"data": {
"text/markdown": [
"\n",
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n",
" <td>\n",
" <a target=\"_blank\" href=\"https://console.cloud.google.com/ai-platform/notebooks/deploy-notebook?name=Splitting+dataset+and+writing+TF+Records&url=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fpractical-ml-vision-book%2Fblob%2Fmaster%2F05_create_dataset%2F05_split_tfrecord.ipynb&download_url=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fpractical-ml-vision-book%2Fraw%2Fmaster%2F05_create_dataset%2F05_split_tfrecord.ipynb\">\n",
" <img src=\"https://raw.githubusercontent.com/GoogleCloudPlatform/practical-ml-vision-book/master/logo-cloud.png\"/> Run in AI Platform Notebook</a>\n",
" </td>\n",
" </td>\n",
" <td>\n",
" <a target=\"_blank\" href=\"https://colab.research.google.com/github/GoogleCloudPlatform/practical-ml-vision-book/blob/master/05_create_dataset/05_split_tfrecord.ipynb\">\n",
" <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n",
" </td>\n",
" <td>\n",
" <a target=\"_blank\" href=\"https://github.com/GoogleCloudPlatform/practical-ml-vision-book/blob/master/05_create_dataset/05_split_tfrecord.ipynb\">\n",
" <img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n",
" </td>\n",
" <td>\n",
" <a href=\"https://raw.githubusercontent.com/GoogleCloudPlatform/practical-ml-vision-book/master/05_create_dataset/05_split_tfrecord.ipynb\">\n",
" <img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n",
" </td>\n",
"</table>\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from IPython.display import Markdown as md\n",
"\n",
"### change to reflect your notebook\n",
"_nb_loc = \"05_create_dataset/05_split_tfrecord.ipynb\"\n",
"_nb_title = \"Splitting dataset and writing TF Records\"\n",
"\n",
"### no need to change any of this\n",
"_nb_safeloc = _nb_loc.replace('/', '%2F')\n",
"_nb_safetitle = _nb_title.replace(' ', '+')\n",
"md(\"\"\"\n",
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n",
" <td>\n",
" <a target=\"_blank\" href=\"https://console.cloud.google.com/ai-platform/notebooks/deploy-notebook?name={1}&url=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fpractical-ml-vision-book%2Fblob%2Fmaster%2F{2}&download_url=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fpractical-ml-vision-book%2Fraw%2Fmaster%2F{2}\">\n",
" <img src=\"https://raw.githubusercontent.com/GoogleCloudPlatform/practical-ml-vision-book/master/logo-cloud.png\"/> Run in AI Platform Notebook</a>\n",
" </td>\n",
" </td>\n",
" <td>\n",
" <a target=\"_blank\" href=\"https://colab.research.google.com/github/GoogleCloudPlatform/practical-ml-vision-book/blob/master/{0}\">\n",
" <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n",
" </td>\n",
" <td>\n",
" <a target=\"_blank\" href=\"https://github.com/GoogleCloudPlatform/practical-ml-vision-book/blob/master/{0}\">\n",
" <img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n",
" </td>\n",
" <td>\n",
" <a href=\"https://raw.githubusercontent.com/GoogleCloudPlatform/practical-ml-vision-book/master/{0}\">\n",
" <img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n",
" </td>\n",
"</table>\n",
"\"\"\".format(_nb_loc, _nb_safetitle, _nb_safeloc))"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Gksy_Cqe2PND"
},
"source": [
"# Splitting dataset and writing TF Records\n",
"\n",
"This notebook shows you how to split a dataset into training, validation, testing and write those images into TensorFlow Record files.\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>image</th>\n",
" <th>label</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/100...</td>\n",
" <td>daisy</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/101...</td>\n",
" <td>daisy</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/101...</td>\n",
" <td>daisy</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/101...</td>\n",
" <td>daisy</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/101...</td>\n",
" <td>daisy</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" image label\n",
"0 gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/100... daisy\n",
"1 gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/101... daisy\n",
"2 gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/101... daisy\n",
"3 gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/101... daisy\n",
"4 gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/101... daisy"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import pandas as pd\n",
"df = pd.read_csv('gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/all_data.csv', names=['image','label'])\n",
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"3670 2930 359 381\n"
]
}
],
"source": [
"import numpy as np\n",
"np.random.seed(10)\n",
"rnd = np.random.rand(len(df))\n",
"train = df[ rnd < 0.8 ]\n",
"valid = df[ (rnd >= 0.8) & (rnd < 0.9) ]\n",
"test = df[ rnd >= 0.9 ]\n",
"print(len(df), len(train), len(valid), len(test))"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"%%bash\n",
"rm -rf output\n",
"mkdir output"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"train.to_csv('output/train.csv', header=False, index=False)\n",
"valid.to_csv('output/valid.csv', header=False, index=False)\n",
"test.to_csv('output/test.csv', header=False, index=False)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/10466290366_cc72e33532.jpg,daisy\n",
"gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/10712722853_5632165b04.jpg,daisy\n",
"gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/11642632_1e7627a2cc.jpg,daisy\n",
"gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/13583238844_573df2de8e_m.jpg,daisy\n",
"gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/1374193928_a52320eafa.jpg,daisy\n",
"gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/13953307149_f8de6a768c_m.jpg,daisy\n",
"gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/14471433500_cdaa22e3ea_m.jpg,daisy\n",
"gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/14523675369_97c31d0b5b.jpg,daisy\n",
"gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/163978992_8128b49d3e_n.jpg,daisy\n",
"gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/16401288243_36112bd52f_m.jpg,daisy\n"
]
}
],
"source": [
"!head output/test.csv"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Writing TF Records using Apache Beam\n",
"\n",
"For speed, we'll illustrate writing just 5 records"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"outdf = test.head()\n",
"len(outdf)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([['gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/10466290366_cc72e33532.jpg',\n",
" 'daisy'],\n",
" ['gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/10712722853_5632165b04.jpg',\n",
" 'daisy'],\n",
" ['gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/11642632_1e7627a2cc.jpg',\n",
" 'daisy'],\n",
" ['gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/13583238844_573df2de8e_m.jpg',\n",
" 'daisy'],\n",
" ['gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/1374193928_a52320eafa.jpg',\n",
" 'daisy']], dtype=object)"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"outdf.values"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"daisy\n",
"dandelion\n",
"roses\n",
"sunflowers\n",
"tulips\n"
]
}
],
"source": [
"!gsutil cat gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/dict.txt"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Read in 5 labels, from daisy to tulips\n"
]
}
],
"source": [
"import tensorflow as tf\n",
"with tf.io.gfile.GFile('gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/dict.txt', 'r') as f:\n",
" LABELS = [line.rstrip() for line in f]\n",
"print('Read in {} labels, from {} to {}'.format(\n",
" len(LABELS), LABELS[0], LABELS[-1]))"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/10466290366_cc72e33532.jpg\n",
"gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/10712722853_5632165b04.jpg\n",
"gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/11642632_1e7627a2cc.jpg\n",
"gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/13583238844_573df2de8e_m.jpg\n",
"gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/daisy/1374193928_a52320eafa.jpg\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:apache_beam.io.filebasedsink:Deleting 1 existing files in target path matching: -*-of-%(num_shards)05d\n"
]
}
],
"source": [
"import apache_beam as beam\n",
"import tensorflow as tf\n",
"\n",
"def _string_feature(value):\n",
" return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value.encode('utf-8')]))\n",
"\n",
"def _int64_feature(value):\n",
" return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n",
"\n",
"def _float_feature(value):\n",
" return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n",
"\n",
"def read_and_decode(filename):\n",
" IMG_CHANNELS = 3\n",
" img = tf.io.read_file(filename)\n",
" img = tf.image.decode_jpeg(img, channels=IMG_CHANNELS)\n",
" img = tf.image.convert_image_dtype(img, tf.float32)\n",
" return img\n",
"\n",
"def create_tfrecord(filename, label, label_int):\n",
" print(filename)\n",
" img = read_and_decode(filename)\n",
" dims = img.shape\n",
" img = tf.reshape(img, [-1]) # flatten to 1D array\n",
" return tf.train.Example(features=tf.train.Features(feature={\n",
" 'image': _float_feature(img),\n",
" 'shape': _int64_feature([dims[0], dims[1], dims[2]]),\n",
" 'label': _string_feature(label),\n",
" 'label_int': _int64_feature([label_int])\n",
" })).SerializeToString()\n",
"\n",
"with beam.Pipeline() as p:\n",
" (p \n",
" | 'input_df' >> beam.Create(outdf.values)\n",
" | 'create_tfrecord' >> beam.Map(lambda x: create_tfrecord(x[0], x[1], LABELS.index(x[1])))\n",
" | 'write' >> beam.io.tfrecordio.WriteToTFRecord('output/train')\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 47,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"-rw-r--r-- 1 jupyter jupyter 8777320 Dec 25 19:58 output/train-00000-of-00001\n",
"-rw-r--r-- 1 jupyter jupyter 236472 Dec 25 19:35 output/train.csv\n"
]
}
],
"source": [
"!ls -l output/train*"
]
},
{
"cell_type": "code",
"execution_count": 48,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"hardcoded: train a valid False\n",
"hardcoded: train a train True\n",
"hardcoded: train b valid False\n",
"hardcoded: train b train True\n",
"hardcoded: valid c valid True\n",
"hardcoded: valid c train False\n",
"hardcoded: valid d valid True\n",
"hardcoded: valid d train False\n"
]
}
],
"source": [
"## splitting in Apache Beam\n",
"def hardcoded(x, desired_split):\n",
" split, rec = x\n",
" print('hardcoded: ', split, rec, desired_split, split == desired_split)\n",
" if split == desired_split:\n",
" yield rec\n",
"\n",
"with beam.Pipeline() as p:\n",
" splits = (p\n",
" | 'input_df' >> beam.Create([\n",
" ('train', 'a'),\n",
" ('train', 'b'),\n",
" ('valid', 'c'),\n",
" ('valid', 'd')\n",
" ]))\n",
" \n",
" split = 'train'\n",
" _ = (splits\n",
" | 'h_only_{}'.format(split) >> beam.FlatMap(\n",
" lambda x: hardcoded(x, 'train'))\n",
" ) \n",
" split = 'valid'\n",
" _ = (splits\n",
" | 'h_only_{}'.format(split) >> beam.FlatMap(\n",
" lambda x: hardcoded(x, 'valid'))\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Running on Dataflow\n",
"\n",
"Apache Beam code can be executed in a serverless way using Cloud Dataflow."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The key thing is to:\n",
"<br/>\n",
"Replace beam.Pipeline() by:\n",
"\n",
"<pre>\n",
"options = {\n",
" 'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'),\n",
" 'temp_location': os.path.join(OUTPUT_DIR, 'tmp'),\n",
" 'job_name': JOBNAME,\n",
" 'project': PROJECT,\n",
" 'teardown_policy': 'TEARDOWN_ALWAYS',\n",
" 'save_main_session': True\n",
" }\n",
"opts = beam.pipeline.PipelineOptions(flags=[], **options)\n",
"with beam.Pipeline(RUNNER, options=opts) as p:\n",
"</pre>"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%bash\n",
"PROJECT=$(gcloud config get-value project)\n",
"BUCKET=${PROJECT}\n",
"\n",
"python3 -m jpeg_to_tfrecord \\\n",
" --all_data gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/all_data.csv \\\n",
" --labels_file gs://practical-ml-vision-book-data/flowers_5_jpeg/flower_photos/dict.txt \\\n",
" --project_id $PROJECT \\\n",
" --output_dir gs://${BUCKET}/data/flower_tfrecords"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<img src=\"dataflow_pipeline.png\" width=\"75%\"/>"
]
},
{
"cell_type": "code",
"execution_count": 50,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" 4475228 2020-12-25T22:58:29Z gs://ai-analytics-solutions/data/flower_tfrecords/test-00001-of-00099\n",
" 16718085 2020-12-25T22:58:29Z gs://ai-analytics-solutions/data/flower_tfrecords/train-00001-of-00168\n",
" 2919806 2020-12-25T22:58:29Z gs://ai-analytics-solutions/data/flower_tfrecords/valid-00001-of-00096\n",
"TOTAL: 3 objects, 24113119 bytes (23 MiB)\n"
]
}
],
"source": [
"!gsutil ls -l gs://ai-analytics-solutions/data/flower_tfrecords/*-00001-*"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "l_fNzWuY2UoB"
},
"source": [
"Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License."
]
}
],
"metadata": {
"colab": {
"collapsed_sections": [],
"name": "04_audio.ipynb",
"provenance": [],
"toc_visible": true
},
"environment": {
"name": "tf2-2-3-gpu.2-3.m59",
"type": "gcloud",
"uri": "gcr.io/deeplearning-platform-release/tf2-2-3-gpu.2-3:m59"
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.8"
}
},
"nbformat": 4,
"nbformat_minor": 4
}