courses/machine_learning/deepdive/05_review/labs/5_train.ipynb (38 lines): - line 180: " # TODO: Your code goes here\n", - line 182: " # TODO: Your code goes here\n", - line 184: " # TODO: Your code goes here\n", - line 186: " # TODO: Your code goes here\n", - line 188: " # TODO: Your code goes here\n", - line 252: "# TODO: Your code goes here\n", - line 255: "# TODO: Your code goes here\n", - line 269: " # TODO: Your code goes here\n", - line 279: " # TODO: Your code goes here\n", - line 282: " # TODO: Your code goes here\n", - line 284: " dataset = # TODO: Your code goes here\n", - line 292: " # TODO: Your code goes here\n", - line 298: " # TODO: Your code goes here\n", - line 310: " # TODO: Your code goes here" - line 342: " --bucket= # TODO: Your code goes here\n", - line 343: " --output_dir= # TODO: Your code goes here\n", - line 345: " --pattern= # TODO: Your code goes here\n", - line 346: " --train_examples= # TODO: Your code goes here\n", - line 347: " --eval_steps= # TODO: Your code goes here" - line 390: "gcloud ai-platform local predict # TODO: Your code goes here" - line 423: " --region= # TODO: Your code goes here\n", - line 424: " --module-name= # TODO: Your code goes here\n", - line 425: " --package-path= # TODO: Your code goes here\n", - line 426: " --job-dir= # TODO: Your code goes here\n", - line 428: " --scale-tier= #TODO: Your code goes here\n", - line 429: " --runtime-version= #TODO: Your code goes here\n", - line 483: " type: # TODO: Your code goes here\n", - line 484: " minValue: # TODO: Your code goes here\n", - line 485: " maxValue: # TODO: Your code goes here\n", - line 486: " scaleType: # TODO: Your code goes here\n", - line 488: " type: # TODO: Your code goes here\n", - line 489: " minValue: # TODO: Your code goes here\n", - line 490: " maxValue: # TODO: Your code goes here\n", - line 491: " scaleType: # TODO: Your code goes here\n", - line 493: " type: # TODO: Your code goes here\n", - line 494: " minValue: # TODO: Your code goes here\n", - line 495: " maxValue: # TODO: Your code goes here\n", - line 496: " scaleType: # TODO: Your code goes here" courses/machine_learning/deepdive2/structured/labs/5a_train_keras_ai_platform_babyweight.ipynb (38 lines): - line 81: "# TODO: Change these to try this notebook out\n", - line 240: " # TODO: Add nnsize argument\n", - line 242: " # TODO: Add nembeds argument\n", - line 244: " # TODO: Add num_epochs argument\n", - line 246: " # TODO: Add train_examples argument\n", - line 248: " # TODO: Add eval_steps argument\n", - line 299: "# TODO: Add CSV_COLUMNS and LABEL_COLUMN\n", - line 303: "# TODO: Add DEFAULTS\n", - line 307: " # TODO: Add your code here\n", - line 312: " # TODO: Add your code here\n", - line 317: " # TODO: Add your code here\n", - line 322: " # TODO: Add your code here\n", - line 327: " # TODO: Add your code here\n", - line 332: " # TODO: Add your code here\n", - line 337: " # TODO: Add your code here\n", - line 342: " # TODO: Add your code here\n", - line 416: " --batch_size=# TODO: Add batch size\n", - line 417: " --num_epochs=# TODO: Add the number of epochs to train for\n", - line 418: " --train_examples=# TODO: Add the number of examples to train each epoch for\n", - line 419: " --eval_steps=# TODO: Add the number of evaluation batches to run" - line 660: " --train_data_path=# TODO: Add path to training data in GCS\n", - line 661: " --eval_data_path=# TODO: Add path to evaluation data in GCS\n", - line 663: " --num_epochs=# TODO: Add the number of epochs to train for\n", - line 664: " --train_examples=# TODO: Add the number of examples to train each epoch for\n", - line 665: " --eval_steps=# TODO: Add the number of evaluation batches to run\n", - line 666: " --batch_size=# TODO: Add batch size\n", - line 667: " --nembeds=# TODO: Add number of embedding dimensions" - line 701: " hyperparameterMetricTag: # TODO: Add metric we want to optimize\n", - line 702: " goal: # TODO: MAXIMIZE or MINIMIZE?\n", - line 708: " type: # TODO: What datatype?\n", - line 709: " minValue: # TODO: Choose a min value\n", - line 710: " maxValue: # TODO: Choose a max value\n", - line 711: " scaleType: # TODO: UNIT_LINEAR_SCALE or UNIT_LOG_SCALE?\n", - line 713: " type: # TODO: What datatype?\n", - line 714: " minValue: # TODO: Choose a min value\n", - line 715: " maxValue: # TODO: Choose a max value\n", - line 716: " scaleType: # TODO: UNIT_LINEAR_SCALE or UNIT_LOG_SCALE?" - line 739: " --# TODO: Add config for hyperparam.yaml\n", courses/machine_learning/deepdive2/end_to_end_ml/labs/train_keras_ai_platform_babyweight.ipynb (38 lines): - line 90: "# TODO: Change these to try this notebook out\n", - line 249: " # TODO: Add nnsize argument\n", - line 251: " # TODO: Add nembeds argument\n", - line 253: " # TODO: Add num_epochs argument\n", - line 255: " # TODO: Add train_examples argument\n", - line 257: " # TODO: Add eval_steps argument\n", - line 309: "# TODO: Add CSV_COLUMNS and LABEL_COLUMN\n", - line 313: "# TODO: Add DEFAULTS\n", - line 317: " # TODO: Add your code here\n", - line 322: " # TODO: Add your code here\n", - line 327: " # TODO: Add your code here\n", - line 332: " # TODO: Add your code here\n", - line 337: " # TODO: Add your code here\n", - line 342: " # TODO: Add your code here\n", - line 347: " # TODO: Add your code here\n", - line 352: " # TODO: Add your code here\n", - line 426: " --batch_size=# TODO: Add batch size\n", - line 427: " --num_epochs=# TODO: Add the number of epochs to train for\n", - line 428: " --train_examples=# TODO: Add the number of examples to train each epoch for\n", - line 429: " --eval_steps=# TODO: Add the number of evaluation batches to run" - line 670: " --train_data_path=# TODO: Add path to training data in GCS\n", - line 671: " --eval_data_path=# TODO: Add path to evaluation data in GCS\n", - line 673: " --num_epochs=# TODO: Add the number of epochs to train for\n", - line 674: " --train_examples=# TODO: Add the number of examples to train each epoch for\n", - line 675: " --eval_steps=# TODO: Add the number of evaluation batches to run\n", - line 676: " --batch_size=# TODO: Add batch size\n", - line 677: " --nembeds=# TODO: Add number of embedding dimensions" - line 711: " hyperparameterMetricTag: # TODO: Add metric we want to optimize\n", - line 712: " goal: # TODO: MAXIMIZE or MINIMIZE?\n", - line 718: " type: # TODO: What datatype?\n", - line 719: " minValue: # TODO: Choose a min value\n", - line 720: " maxValue: # TODO: Choose a max value\n", - line 721: " scaleType: # TODO: UNIT_LINEAR_SCALE or UNIT_LOG_SCALE?\n", - line 723: " type: # TODO: What datatype?\n", - line 724: " minValue: # TODO: Choose a min value\n", - line 725: " maxValue: # TODO: Choose a max value\n", - line 726: " scaleType: # TODO: UNIT_LINEAR_SCALE or UNIT_LOG_SCALE?" - line 749: " --# TODO: Add config for hyperparam.yaml\n", courses/machine_learning/asl/05_review/labs/5_train.ipynb (38 lines): - line 180: " # TODO: Your code goes here\n", - line 182: " # TODO: Your code goes here\n", - line 184: " # TODO: Your code goes here\n", - line 186: " # TODO: Your code goes here\n", - line 188: " # TODO: Your code goes here\n", - line 252: "# TODO: Your code goes here\n", - line 255: "# TODO: Your code goes here\n", - line 269: " # TODO: Your code goes here\n", - line 279: " # TODO: Your code goes here\n", - line 282: " # TODO: Your code goes here\n", - line 284: " dataset = # TODO: Your code goes here\n", - line 292: " # TODO: Your code goes here\n", - line 298: " # TODO: Your code goes here\n", - line 310: " # TODO: Your code goes here" - line 342: " --bucket= # TODO: Your code goes here\n", - line 343: " --output_dir= # TODO: Your code goes here\n", - line 345: " --pattern= # TODO: Your code goes here\n", - line 346: " --train_examples= # TODO: Your code goes here\n", - line 347: " --eval_steps= # TODO: Your code goes here" - line 390: "gcloud ai-platform local predict # TODO: Your code goes here" - line 423: " --region= # TODO: Your code goes here\n", - line 424: " --module-name= # TODO: Your code goes here\n", - line 425: " --package-path= # TODO: Your code goes here\n", - line 426: " --job-dir= # TODO: Your code goes here\n", - line 428: " --scale-tier= #TODO: Your code goes here\n", - line 429: " --runtime-version= #TODO: Your code goes here\n", - line 483: " type: # TODO: Your code goes here\n", - line 484: " minValue: # TODO: Your code goes here\n", - line 485: " maxValue: # TODO: Your code goes here\n", - line 486: " scaleType: # TODO: Your code goes here\n", - line 488: " type: # TODO: Your code goes here\n", - line 489: " minValue: # TODO: Your code goes here\n", - line 490: " maxValue: # TODO: Your code goes here\n", - line 491: " scaleType: # TODO: Your code goes here\n", - line 493: " type: # TODO: Your code goes here\n", - line 494: " minValue: # TODO: Your code goes here\n", - line 495: " maxValue: # TODO: Your code goes here\n", - line 496: " scaleType: # TODO: Your code goes here" courses/machine_learning/deepdive2/introduction_to_tensorflow/labs/1_core_tensorflow.ipynb (33 lines): - line 131: "# TODO 1\n", - line 132: "x.assign( # TODO: Your code goes here. \n", - line 142: "# TODO 2\n", - line 143: "x.assign( # TODO: Your code goes here. \n", - line 153: "# TODO 3\n", - line 154: "x.assign( # TODO: Your code goes here. \n", - line 196: "# TODO 1\n", - line 197: "a = # TODO: Your code goes here.\n", - line 198: "b = # TODO: Your code goes here.\n", - line 199: "c = # TODO: Your code goes here.\n", - line 200: "d = # TODO: Your code goes here.\n", - line 212: "# TODO 2\n", - line 213: "a = # TODO: Your code goes here.\n", - line 214: "b = # TODO: Your code goes here.\n", - line 215: "c = # TODO: Your code goes here.\n", - line 216: "d = # TODO: Your code goes here.\n", - line 228: "# TODO 3\n", - line 230: "a = # TODO: Your code goes here.\n", - line 231: "b = # TODO: Your code goes here.\n", - line 269: "# TODO 1\n", - line 270: "# TODO: Your code goes here." - line 297: "# TODO 1\n", - line 298: "# TODO: Your code goes here." - line 325: "# TODO 1\n", - line 326: "# TODO: Your code goes here." - line 535: "# TODO 1\n", - line 537: " # TODO: Your code goes here." - line 595: "# TODO 1\n", - line 607: " dw0, dw1 = #TODO: Your code goes here.\n", - line 608: " #TODO: Your code goes here.\n", - line 609: " #TODO: Your code goes here.\n", - line 612: " loss = #TODO: Your code goes here.\n", - line 730: "# TODO 2\n", courses/machine_learning/asl/05_review/labs/3_tensorflow_wide_deep.ipynb (26 lines): - line 123: "CSV_COLUMNS = # TODO: Your code goes here\n", - line 124: "LABEL_COLUMN = # TODO: Your code goes here\n", - line 127: "DEFAULTS = # TODO: Your code goes here\n", - line 149: "In the next TODO you are asked to set up the dataset depending on whether you are in `TRAIN` mode or not. (**Hint**: Use `tf.estimator.ModeKeys.TRAIN`). When in `TRAIN` mode, set the appropriate number of epochs and shuffle the data accordingly. When not in `TRAIN` mode, you will use a different number of epochs and there is no need to shuffle the data. \n", - line 169: " columns = # TODO: Your code goes here\n", - line 170: " features = # TODO: Your code goes here\n", - line 171: " label = # TODO: Your code goes here\n", - line 175: " file_list = # TODO: Your code goes here\n", - line 178: " dataset = # TODO: Your code goes here\n", - line 181: " # TODO: Your code goes here\n", - line 184: " dataset = # TODO: Your code goes here\n", - line 216: " fc_is_male,fc_plurality,fc_mother_age,fc_gestation_weeks = [# TODO: Your code goes here]\n", - line 219: " fc_age_buckets = # TODO: Your code goes here\n", - line 220: " fc_gestation_buckets = # TODO: Your code goes here\n", - line 223: " wide = [# TODO: Your code goes here]\n", - line 226: " fc_crossed = # TODO: Your code goes here\n", - line 227: " fc_embed = # TODO: Your code goes here\n", - line 230: " deep = [# TODO: Your code goes here]\n", - line 244: "In the first TODO below, create the `feature_placeholders` dictionary by setting up the placeholders for each of the features we will use in our model. Look at the documentation for `tf.placeholder` to make sure you provide all the necessary arguments. You'll need to create placeholders for the features\n", - line 261: " feature_placeholders = # TODO: Your code goes here\n", - line 263: " features = # TODO: Your code goes here\n", - line 299: " estimator = # TODO: Your code goes here\n", - line 300: " train_spec = # TODO: Your code goes here\n", - line 301: " exporter = # TODO: Your code goes here\n", - line 302: " eval_spec = # TODO: Your code goes here\n", - line 304: " tf.estimator.train_and_evaluate(# TODO: Your code goes here)" courses/machine_learning/deepdive/05_review/labs/3_tensorflow_wide_deep.ipynb (26 lines): - line 123: "CSV_COLUMNS = # TODO: Your code goes here\n", - line 124: "LABEL_COLUMN = # TODO: Your code goes here\n", - line 127: "DEFAULTS = # TODO: Your code goes here\n", - line 149: "In the next TODO you are asked to set up the dataset depending on whether you are in `TRAIN` mode or not. (**Hint**: Use `tf.estimator.ModeKeys.TRAIN`). When in `TRAIN` mode, set the appropriate number of epochs and shuffle the data accordingly. When not in `TRAIN` mode, you will use a different number of epochs and there is no need to shuffle the data. \n", - line 169: " columns = # TODO: Your code goes here\n", - line 170: " features = # TODO: Your code goes here\n", - line 171: " label = # TODO: Your code goes here\n", - line 175: " file_list = # TODO: Your code goes here\n", - line 178: " dataset = # TODO: Your code goes here\n", - line 181: " # TODO: Your code goes here\n", - line 184: " dataset = # TODO: Your code goes here\n", - line 216: " fc_is_male,fc_plurality,fc_mother_age,fc_gestation_weeks = [# TODO: Your code goes here]\n", - line 219: " fc_age_buckets = # TODO: Your code goes here\n", - line 220: " fc_gestation_buckets = # TODO: Your code goes here\n", - line 223: " wide = [# TODO: Your code goes here]\n", - line 226: " fc_crossed = # TODO: Your code goes here\n", - line 227: " fc_embed = # TODO: Your code goes here\n", - line 230: " deep = [# TODO: Your code goes here]\n", - line 244: "In the first TODO below, create the `feature_placeholders` dictionary by setting up the placeholders for each of the features we will use in our model. Look at the documentation for `tf.placeholder` to make sure you provide all the necessary arguments. You'll need to create placeholders for the features\n", - line 261: " feature_placeholders = # TODO: Your code goes here\n", - line 263: " features = # TODO: Your code goes here\n", - line 299: " estimator = # TODO: Your code goes here\n", - line 300: " train_spec = # TODO: Your code goes here\n", - line 301: " exporter = # TODO: Your code goes here\n", - line 302: " eval_spec = # TODO: Your code goes here\n", - line 304: " tf.estimator.train_and_evaluate(# TODO: Your code goes here)" courses/machine_learning/deepdive2/time_series_prediction/labs/4_modeling_keras.ipynb (25 lines): - line 508: "# TODO 1a\n", - line 511: "model.add( # TODO: Your code goes here.\n", - line 513: "model.compile( # TODO: Your code goes here.\n", - line 515: "history = model.fit( # TODO: Your code goes here." - line 583: "#TODO 1b\n", - line 586: "# TODO: Your code goes here." - line 644: "#TODO 1c\n", - line 648: "# TODO: Your code goes here.\n", - line 651: "# TODO: Your code goes here.\n", - line 654: "# TODO: Your code goes here." - line 711: "#TODO 2a\n", - line 715: "# TODO: Your code goes here.\n", - line 718: "# TODO: Your code goes here." - line 774: "#TODO 2b\n", - line 779: "# TODO: Your code goes here.\n", - line 782: "# TODO: Your code goes here." - line 838: "#TODO 3a\n", - line 842: "# TODO: Your code goes here.\n", - line 845: "# TODO: Your code goes here.\n", - line 848: "# TODO: Your code goes here." - line 902: "#TODO 3b\n", - line 906: "# TODO: Your code goes here.\n", - line 909: "# TODO: Your code goes here.\n", - line 912: "# TODO: Your code goes here.\n", - line 915: "# TODO: Your code goes here." courses/machine_learning/deepdive2/text_classification/labs/keras_for_text_classification.ipynb (24 lines): - line 95: " # TODO: Your code goes here.\n", - line 97: " # TODO: Your code goes here.\n", - line 99: " # TODO: Your code goes here.\n", - line 100: " # TODO: Your code goes here.\n", - line 101: " # TODO: Your code goes here.\n", - line 130: " # TODO: Your code goes here.\n", - line 135: " # TODO: Your code goes here.\n", - line 137: " # TODO: Your code goes here.\n", - line 299: "sample_title_dataset = # TODO: Your code goes here.\n", - line 300: "# TODO: Your code goes here." - line 506: "# TODO 1\n", - line 508: " sequences = # TODO: Your code goes here.\n", - line 509: " padded_sequences = # TODO: Your code goes here.\n", - line 573: "# TODO 2\n", - line 575: " classes = # TODO: Your code goes here.\n", - line 576: " one_hots = # TODO: Your code goes here.\n", - line 724: " # TODO: Your code goes here.\n", - line 725: " # TODO: Your code goes here.\n", - line 726: " # TODO: Your code goes here.\n", - line 813: " # TODO: Your code goes here.\n", - line 814: " # TODO: Your code goes here.\n", - line 905: " # TODO: Your code goes here.\n", - line 906: " # TODO: Your code goes here.\n", - line 907: " # TODO: Your code goes here.\n", courses/machine_learning/deepdive2/introduction_to_tensorflow/labs/load_diff_filedata.ipynb (23 lines): - line 171: "# TODO 1: Add string name for label column \n", - line 199: "# TODO 2\n", - line 200: "# TODO: Read the CSV data from the file and create a dataset \n", - line 202: "# TODO: Your code goes here.\n", - line 203: "# TODO: Your code goes here.\n", - line 204: "# TODO: Your code goes here.\n", - line 205: "# TODO: Your code goes here.\n", - line 209: "raw_train_data = # TODO: Your code goes here.\n", - line 210: "raw_test_data = # TODO: Your code goes here." - line 822: "# TODO 1\n", - line 823: "MEAN = # TODO: Your code goes here.\n", - line 824: "STD = # TODO: Your code goes here." - line 839: " # TODO 2" - line 1119: "# TODO 1\n", - line 1120: "preprocessing_layer = # TODO: Your code goes here." - line 1212: "# TODO 1\n", - line 1213: " train_examples = # TODO: Your code goes here.\n", - line 1214: " train_labels = # TODO: Your code goes here.\n", - line 1215: " test_examples = # TODO: Your code goes here.\n", - line 1216: " test_labels = # TODO: Your code goes here." - line 1239: "# TODO 2\n", - line 1240: "train_dataset = # TODO: Your code goes here.\n", - line 1241: "test_dataset = # TODO: Your code goes here." courses/machine_learning/deepdive2/introduction_to_tensorflow/labs/tensors-variables.ipynb (22 lines): - line 178: " [5, 6]], dtype= # TODO 1a\n", - line 179: " # TODO: Your code goes here.\n", - line 331: "# TODO 1b\n", - line 332: "# TODO -- Your code here." - line 359: "# TODO 1c\n", - line 360: "# TODO -- Your code here." - line 500: "# TODO 1d\n", - line 502: "# TODO -- Your code here. \n", - line 504: "# TODO -- Your code here." - line 1020: "# TODO 2a\n", - line 1021: "# TODO -- Your code here." - line 1328: "# TODO 2b\n", - line 1329: "the_f64_tensor = # TODO -- Your code here.\n", - line 1330: "the_f16_tensor = # TODO -- Your code here.\n", - line 1332: "the_u8_tensor = # TODO -- Your code here.\n", - line 1654: "# TODO 2c\n", - line 1655: "ragged_tensor = # TODO -- Your code here.\n", - line 2077: "# TODO 2d\n", - line 2078: "sparse_tensor = # TODO -- Your code here.\n", - line 2137: "# TODO 3a\n", - line 2138: "my_tensor = # TODO -- Your code here.\n", - line 2139: "my_variable = # TODO -- Your code here.\n", courses/machine_learning/deepdive/02_tensorflow/labs/f_ai_platform.ipynb (22 lines): - line 147: "# TODO: Your code goes here. Import the necessary libraries (e.g. tensorflow, etc)\n", - line 149: "CSV_COLUMN_NAMES = # TODO: Your code goes here\n", - line 150: "CSV_DEFAULTS = # TODO: Your code goes here\n", - line 151: "FEATURE_NAMES = # TODO: Your code goes here\n", - line 154: " # TODO: Your code goes here\n", - line 158: " # TODO: Your code goes here\n", - line 162: " # TODO: Your code goes here\n", - line 166: " # TODO: Your code goes here\n", - line 170: " # TODO: Your code goes here\n", - line 174: " # TODO: Your code goes here\n", - line 178: " # TODO: Your code goes here\n", - line 187: " model = # TODO: Your code goes here. \n", - line 189: " train_spec = # TODO: Your code goes here\n", - line 191: " exporter = # TODO: Your code goes here\n", - line 193: " eval_spec = # TODO: Your code goes here\n", - line 262: " # TODO: Your code goes here\n", - line 265: " # TODO: Your code goes here\n", - line 478: "credentials = # TODO: Your code goes here\n", - line 479: "api = # TODO: Your code goes here\n", - line 484: " # TODO: Your code goes here\n", - line 489: "parent = # TODO: Your code goes here\n", - line 491: "response = # TODO: Your code goes here\n", courses/machine_learning/asl/02_tensorflow/labs/f_ai_platform.ipynb (22 lines): - line 147: "# TODO: Your code goes here. Import the necessary libraries (e.g. tensorflow, etc)\n", - line 149: "CSV_COLUMN_NAMES = # TODO: Your code goes here\n", - line 150: "CSV_DEFAULTS = # TODO: Your code goes here\n", - line 151: "FEATURE_NAMES = # TODO: Your code goes here\n", - line 154: " # TODO: Your code goes here\n", - line 158: " # TODO: Your code goes here\n", - line 162: " # TODO: Your code goes here\n", - line 166: " # TODO: Your code goes here\n", - line 170: " # TODO: Your code goes here\n", - line 174: " # TODO: Your code goes here\n", - line 178: " # TODO: Your code goes here\n", - line 187: " model = # TODO: Your code goes here. \n", - line 189: " train_spec = # TODO: Your code goes here\n", - line 191: " exporter = # TODO: Your code goes here\n", - line 193: " eval_spec = # TODO: Your code goes here\n", - line 262: " # TODO: Your code goes here\n", - line 265: " # TODO: Your code goes here\n", - line 478: "credentials = # TODO: Your code goes here\n", - line 479: "api = # TODO: Your code goes here\n", - line 484: " # TODO: Your code goes here\n", - line 489: "parent = # TODO: Your code goes here\n", - line 491: "response = # TODO: Your code goes here\n", courses/machine_learning/deepdive2/building_production_ml_systems/labs/3_kubeflow_pipelines.ipynb (22 lines): - line 67: "**TODO 1**" - line 97: "HOST = # TODO: fill in the HOST information for the cluster\n", - line 98: "BUCKET = # TODO: fill in the GCS bucket" - line 112: "**TODO 2**" - line 128: "client = # TODO: create a Kubeflow client" - line 160: "exp = # TODO: create an experiment called 'taxifare'" - line 248: "**TODO 3**" - line 280: " image: # TODO: Reference the image URI for taxifare-bq2gcs you just created\n", - line 305: " image: # TODO: Reference the image URI for taxifare-trainjob you just created\n", - line 329: " image: # TODO: Reference the image URI for taxifare-deployment you just created\n", - line 359: "# TODO 3\n", - line 376: " trainjob_op = # TODO: Load the yaml file for training\n", - line 377: " trainjob = # TODO: Add your code to run the training job\n", - line 380: " deploymodel_op = # TODO: Load the yaml file for deployment\n", - line 381: " deploymodel = # TODO: Addd your code to run model deployment\n", - line 384: " # TODO: Add the code to run 'trainjob' after 'bq2gcs' in the pipeline\n", - line 385: " # TODO: Add the code to run 'deploymodel' after 'trainjob' in the pipeline" - line 401: "# TODO: Compile the pipeline functon above" - line 429: "# TODO 4\n", - line 431: " experiment_id= # TODO: Add code for experiment id\n", - line 432: " job_name= # TODO: Provide a jobname\n", - line 433: " pipeline_package_path= # TODO: Add code for pipeline zip file\n", courses/machine_learning/asl/05_review/labs/3_tensorflow_dnn.ipynb (21 lines): - line 115: "CSV_COLUMNS = # TODO: Your code goes here\n", - line 116: "LABEL_COLUMN = # TODO: Your code goes here\n", - line 119: "DEFAULTS = # TODO: Your code goes here\n", - line 141: "In the next TODO you are asked to set up the dataset depending on whether you are in `TRAIN` mode or not. (**Hint**: Use `tf.estimator.ModeKeys.TRAIN`). When in `TRAIN` mode, set the appropriate number of epochs and shuffle the data accordingly. When not in `TRAIN` mode, you will use a different number of epochs and there is no need to shuffle the data. \n", - line 161: " columns = # TODO: Your code goes here\n", - line 162: " features = # TODO: Your code goes here\n", - line 163: " label = # TODO: Your code goes here\n", - line 167: " file_list = # TODO: Your code goes here\n", - line 170: " dataset = # TODO: Your code goes here\n", - line 173: " # TODO: Your code goes here\n", - line 175: " dataset = # TODO: Your code goes here\n", - line 194: "In the first TODO below, you are asked to create a function `get_categorical` which takes a feature name and its potential values and returns an indicator `tf.feature_column` based on a categorical with vocabulary list column. Look back at the documentation for `tf.feature_column.indicator_column` to ensure you call the arguments correctly.\n", - line 206: " return # TODO: Your code goes here\n", - line 210: " return # TODO: Your code goes here" - line 239: " feature_placeholders = # TODO: Your code goes here\n", - line 241: " features = # TODO: Your code goes here\n", - line 277: " estimator = # TODO: Your code goes here\n", - line 278: " train_spec = # TODO: Your code goes here\n", - line 279: " exporter = # TODO: Your code goes here\n", - line 280: " eval_spec = # TODO: Your code goes here\n", - line 282: " tf.estimator.train_and_evaluate(# TODO: Your code goes here)" courses/machine_learning/deepdive2/introduction_to_tensorflow/labs/2_dataset_api.ipynb (21 lines): - line 136: "# TODO 1\n", - line 138: " dataset = # TODO -- Your code here.\n", - line 139: " dataset = # TODO -- Your code here.\n", - line 274: "# TODO 2\n", - line 284: "dataset = # TODO -- Your code here.\n", - line 286: "for step, (X_batch, Y_batch) in # TODO -- Your code here.\n", - line 288: " dw0, dw1 = # TODO -- Your code here.\n", - line 289: " # TODO -- Your code here.\n", - line 292: " loss = # TODO -- Your code here.\n", - line 408: "# TODO 3\n", - line 410: " # TODO -- Your code here.\n", - line 512: "# TODO 4a\n", - line 514: " label = # TODO -- Your code here.\n", - line 515: " features = # TODO -- Your code here.\n", - line 517: " # TODO -- Your code here.\n", - line 603: "# TODO 4b\n", - line 608: " dataset = # TODO -- Your code here.\n", - line 694: "# TODO 4c\n", - line 699: " dataset = # TODO -- Your code here.\n", - line 702: " dataset = # TODO -- Your code here.\n", - line 705: " dataset = # TODO -- Your code here.\n", courses/machine_learning/deepdive/05_review/labs/3_tensorflow_dnn.ipynb (21 lines): - line 115: "CSV_COLUMNS = # TODO: Your code goes here\n", - line 116: "LABEL_COLUMN = # TODO: Your code goes here\n", - line 119: "DEFAULTS = # TODO: Your code goes here\n", - line 141: "In the next TODO you are asked to set up the dataset depending on whether you are in `TRAIN` mode or not. (**Hint**: Use `tf.estimator.ModeKeys.TRAIN`). When in `TRAIN` mode, set the appropriate number of epochs and shuffle the data accordingly. When not in `TRAIN` mode, you will use a different number of epochs and there is no need to shuffle the data. \n", - line 161: " columns = # TODO: Your code goes here\n", - line 162: " features = # TODO: Your code goes here\n", - line 163: " label = # TODO: Your code goes here\n", - line 167: " file_list = # TODO: Your code goes here\n", - line 170: " dataset = # TODO: Your code goes here\n", - line 173: " # TODO: Your code goes here\n", - line 175: " dataset = # TODO: Your code goes here\n", - line 194: "In the first TODO below, you are asked to create a function `get_categorical` which takes a feature name and its potential values and returns an indicator `tf.feature_column` based on a categorical with vocabulary list column. Look back at the documentation for `tf.feature_column.indicator_column` to ensure you call the arguments correctly.\n", - line 206: " return # TODO: Your code goes here\n", - line 210: " return # TODO: Your code goes here" - line 239: " feature_placeholders = # TODO: Your code goes here\n", - line 241: " features = # TODO: Your code goes here\n", - line 277: " estimator = # TODO: Your code goes here\n", - line 278: " train_spec = # TODO: Your code goes here\n", - line 279: " exporter = # TODO: Your code goes here\n", - line 280: " eval_spec = # TODO: Your code goes here\n", - line 282: " tf.estimator.train_and_evaluate(# TODO: Your code goes here)" courses/machine_learning/deepdive2/structured/labs/4c_keras_wide_and_deep_babyweight.ipynb (20 lines): - line 140: "# TODO: Create list of string column headers, make sure order matches.\n", - line 143: "# TODO: Add string name for label column\n", - line 189: " # TODO: Make a CSV dataset\n", - line 192: " # TODO: Map dataset to features and label\n", - line 229: " # TODO: Create dictionary of tf.keras.layers.Input for each dense feature\n", - line 232: " # TODO: Create dictionary of tf.keras.layers.Input for each sparse feature\n", - line 263: " # TODO: Create deep feature columns for numeric features\n", - line 266: " # TODO: Create wide feature columns for categorical features\n", - line 269: " # TODO: Bucketize the float fields. This makes them wide\n", - line 271: " # TODO: Cross all the wide cols, have to do the crossing before we one-hot\n", - line 273: " # TODO: Embed cross and add to deep feature columns\n", - line 308: " # TODO: Create DNN model for the deep side\n", - line 311: " # TODO: Create linear model for the wide side\n", - line 318: " # TODO: Create final output layer\n", - line 347: " # TODO: Calculate RMSE from true and predicted labels\n", - line 381: " # TODO: Add wide and deep feature colummns\n", - line 393: " # TODO: Add custom eval metrics to list\n", - line 448: "# TODO: Load training dataset\n", - line 451: "# TODO: Load evaluation dataset\n", - line 461: "# TODO: Fit model on training dataset and evaluate every so often\n", courses/developingapps/v1.2/nodejs/datastore/start/server/gcp/datastore.js (18 lines): - line 16: // TODO: Load the ../config module - line 22: // TODO: Load the @google-cloud/datastore module - line 28: // TODO: Create a Datastore client object, ds - line 41: // TODO: Declare a constant named kind - line 59: // TODO: Remove Placeholder statement - line 66: // TODO: Declare the entity key, - line 72: // TODO: Declare the entity object, with the key and data - line 77: // TODO: Save the entity, return a promise - line 89: // BONUS TODO: Remove Placeholder statement - line 104: // BONUS TODO: Create the query - line 116: // BONUS TODO: Execute the query - line 124: // TODO: Return the transformed results - line 131: // TODO: For each question returned from Datastore - line 134: // TODO: Add in an id property using the Entity id - line 147: // TODO: Remove the correctAnswer property - line 152: // TODO: return the transformed item - line 161: // TODO: Return the questions - line 163: // TODO: Return a property to allow the client courses/developingapps/nodejs/datastore/start/server/gcp/datastore.js (18 lines): - line 16: // TODO: Load the ../config module - line 22: // TODO: Load the @google-cloud/datastore module - line 28: // TODO: Create a Datastore client object, ds - line 38: // TODO: Declare a constant named kind - line 50: // TODO: Remove Placeholder statement - line 56: // TODO: Declare the entity key, - line 63: // TODO: Declare the entity object, with the key and data - line 68: // TODO: Save the entity, return a promise - line 84: // BONUS TODO: Remove Placeholder statement - line 99: // BONUS TODO: Create the query - line 111: // BONUS TODO: Execute the query - line 119: // TODO: Return the transformed results - line 126: // TODO: For each question returned from Datastore - line 129: // TODO: Add in an id property using the Entity id - line 142: // TODO: Remove the correctAnswer property - line 147: // TODO: return the transformed item - line 156: // TODO: Return the questions - line 158: // TODO: Return a property to allow the client courses/developingapps/v1.3/nodejs/datastore/start/server/gcp/datastore.js (18 lines): - line 16: // TODO: Load the ../config module - line 22: // TODO: Load the @google-cloud/datastore module - line 28: // TODO: Create a Datastore client object, ds - line 41: // TODO: Declare a constant named kind - line 59: // TODO: Remove Placeholder statement - line 66: // TODO: Declare the entity key, - line 72: // TODO: Declare the entity object, with the key and data - line 77: // TODO: Save the entity, return a promise - line 89: // BONUS TODO: Remove Placeholder statement - line 104: // BONUS TODO: Create the query - line 116: // BONUS TODO: Execute the query - line 124: // TODO: Return the transformed results - line 131: // TODO: For each question returned from Datastore - line 134: // TODO: Add in an id property using the Entity id - line 147: // TODO: Remove the correctAnswer property - line 152: // TODO: return the transformed item - line 161: // TODO: Return the questions - line 163: // TODO: Return a property to allow the client courses/developingapps/v1.2/nodejs/datastore/end/server/gcp/datastore.js (18 lines): - line 16: // TODO: Load the ../config module - line 22: // TODO: Load the @google-cloud/datastore module - line 28: // TODO: Create a Datastore client object, ds - line 43: // TODO: Declare a constant named kind - line 61: // TODO: Remove Placeholder statement - line 67: // TODO: Declare the entity key, - line 74: // TODO: Declare the entity object, with the key and data - line 97: // TODO: Save the entity, return a promise - line 111: // BONUS TODO: Remove Placeholder statement - line 126: // BONUS TODO: Create the query - line 138: // BONUS TODO: Execute the query - line 146: // TODO: Return the transformed results - line 153: // TODO: For each question returned from Datastore - line 156: // TODO: Add in an id property using the Entity id - line 169: // TODO: Remove the correctAnswer property - line 174: // TODO: return the transformed item - line 183: // TODO: Return the questions - line 185: // TODO: Return a property to allow the client courses/developingapps/v1.3/nodejs/datastore/end/server/gcp/datastore.js (18 lines): - line 16: // TODO: Load the ../config module - line 22: // TODO: Load the @google-cloud/datastore module - line 28: // TODO: Create a Datastore client object, ds - line 43: // TODO: Declare a constant named kind - line 61: // TODO: Remove Placeholder statement - line 67: // TODO: Declare the entity key, - line 74: // TODO: Declare the entity object, with the key and data - line 97: // TODO: Save the entity, return a promise - line 111: // BONUS TODO: Remove Placeholder statement - line 126: // BONUS TODO: Create the query - line 138: // BONUS TODO: Execute the query - line 146: // TODO: Return the transformed results - line 153: // TODO: For each question returned from Datastore - line 156: // TODO: Add in an id property using the Entity id - line 169: // TODO: Remove the correctAnswer property - line 174: // TODO: return the transformed item - line 183: // TODO: Return the questions - line 185: // TODO: Return a property to allow the client courses/machine_learning/deepdive/05_review/labs/6_deploy.ipynb (17 lines): - line 139: "gcloud # TODO: Your code goes here\n", - line 140: "gcloud # TODO: Your code goes here" - line 180: "MODEL_NAME = # TODO: Your code goes here\n", - line 181: "MODEL_VERSION = # TODO: Your code goes here\n", - line 183: "token = # TODO: Your code goes here\n", - line 184: "api = # TODO: Your code goes here\n", - line 207: " # TODO: Your code goes here\n", - line 210: "response = # TODO: Your code goes here\n", - line 261: "gsutil # TODO: Your code goes here\n", - line 262: "gsutil # TODO: Your code goes here\n", - line 264: "gcloud ai-platform # TODO: Your code goes here\n", - line 265: " --data-format= # TODO: Your code goes here\n", - line 266: " --region= # TODO: Your code goes here\n", - line 267: " --input-paths= # TODO: Your code goes here\n", - line 268: " --output-path= # TODO: Your code goes here\n", - line 269: " --model= # TODO: Your code goes here\n", - line 270: " --version= # TODO: Your code goes here" courses/developingapps/java/pubsub-languageapi-spanner/start/src/main/java/com/google/training/appdev/backend/ConsoleApp.java (17 lines): - line 47: // TODO: Create the languageService - line 53: // TODO: Create the spannerService - line 59: // TODO: Create the Pub/Sub subscription name - line 65: // TODO: Create the subscriptionAdminClient - line 69: // TODO: create the Pub/Sub subscription using the subscription name and topic - line 84: // TODO: Extract the message data as a JSON String - line 90: // TODO: Ack the message - line 100: // TODO: Deserialize the JSON String representing the feedback - line 106: // TODO: Use the Natural Language API to analyze sentiment - line 112: // TODO: Set the feedback object sentiment score - line 118: // TODO: Insert the feedback into Cloud Spanner - line 130: // TODO: Declare a subscriber - line 138: // TODO: Initialize the subscriber using its default builder - line 145: // TODO: Add a listener to the subscriber - line 155: // TODO: Start subscribing - line 168: // TODO: Stop subscribing - line 175: // TODO: Delete the subscription courses/developingapps/v1.2/java/pubsub-languageapi-spanner/start/src/main/java/com/google/training/appdev/backend/ConsoleApp.java (17 lines): - line 47: // TODO: Create the languageService - line 53: // TODO: Create the spannerService - line 59: // TODO: Create the Pub/Sub subscription name - line 65: // TODO: Create the subscriptionAdminClient - line 69: // TODO: create the Pub/Sub subscription using the subscription name and topic - line 84: // TODO: Extract the message data as a JSON String - line 90: // TODO: Ack the message - line 100: // TODO: Deserialize the JSON String representing the feedback - line 106: // TODO: Use the Natural Language API to analyze sentiment - line 112: // TODO: Set the feedback object sentiment score - line 118: // TODO: Insert the feedback into Cloud Spanner - line 130: // TODO: Declare a subscriber - line 138: // TODO: Initialize the subscriber using its default builder - line 145: // TODO: Add a listener to the subscriber - line 155: // TODO: Start subscribing - line 168: // TODO: Stop subscribing - line 175: // TODO: Delete the subscription courses/developingapps/v1.3/java/pubsub-languageapi-spanner/start/src/main/java/com/google/training/appdev/backend/ConsoleApp.java (17 lines): - line 48: // TODO: Create the languageService - line 54: // TODO: Create the spannerService - line 60: // TODO: Create the Pub/Sub subscription name - line 66: // TODO: Create the subscriptionAdminClient - line 70: // TODO: create the Pub/Sub subscription using the subscription name and topic - line 85: // TODO: Extract the message data as a JSON String - line 91: // TODO: Ack the message - line 101: // TODO: Deserialize the JSON String representing the feedback - line 107: // TODO: Use the Natural Language API to analyze sentiment - line 113: // TODO: Set the feedback object sentiment score - line 119: // TODO: Insert the feedback into Cloud Spanner - line 131: // TODO: Declare a subscriber - line 139: // TODO: Initialize the subscriber using its default builder - line 146: // TODO: Add a listener to the subscriber - line 156: // TODO: Start subscribing - line 169: // TODO: Stop subscribing - line 176: // TODO: Delete the subscription courses/machine_learning/asl/05_review/labs/6_deploy.ipynb (17 lines): - line 139: "gcloud # TODO: Your code goes here\n", - line 140: "gcloud # TODO: Your code goes here" - line 180: "MODEL_NAME = # TODO: Your code goes here\n", - line 181: "MODEL_VERSION = # TODO: Your code goes here\n", - line 183: "token = # TODO: Your code goes here\n", - line 184: "api = # TODO: Your code goes here\n", - line 207: " # TODO: Your code goes here\n", - line 210: "response = # TODO: Your code goes here\n", - line 261: "gsutil # TODO: Your code goes here\n", - line 262: "gsutil # TODO: Your code goes here\n", - line 264: "gcloud ai-platform # TODO: Your code goes here\n", - line 265: " --data-format= # TODO: Your code goes here\n", - line 266: " --region= # TODO: Your code goes here\n", - line 267: " --input-paths= # TODO: Your code goes here\n", - line 268: " --output-path= # TODO: Your code goes here\n", - line 269: " --model= # TODO: Your code goes here\n", - line 270: " --version= # TODO: Your code goes here" quests/vertex-ai/vertex-challenge-lab/vertex-challenge-lab.ipynb (16 lines): - line 18: "When you take a Challenge Lab, you will not be taught Google Cloud concepts. To build the solution to the challenge presented, use skills learned from the labs in the Quest this challenge lab is part of. You are expected to extend your learned skills and complete all the **`TODO:`** comments in this notebook.\n", - line 157: "# TODO: Fill in the PROJECT_ID and REGION provided in the lab manual.\n", - line 169: "# TODO: Create a globally unique Google Cloud Storage bucket for artifact storage.\n", - line 580: " # TODO: Add a hub.KerasLayer for BERT text preprocessing using the hparams dict. \n", - line 584: " # TODO: Add a trainable hub.KerasLayer for BERT text encoding using the hparams dict.\n", - line 665: " # TODO: Save your BERT sentiment classifier locally in the form of :. \n", - line 939: " # TODO: Add a hub.KerasLayer for BERT text preprocessing using the hparams dict. \n", - line 943: " # TODO: Add a trainable hub.KerasLayer for BERT text encoding using the hparams dict.\n", - line 1182: "# TODO: create a Docker Artifact Registry using the gcloud CLI. Note the required 'repository-format', 'location' and 'description' flags while creating the Artifact Registry.\n", - line 1246: "# TODO: use Cloud Build to build and submit your custom model container to your Artifact Registry.\n", - line 1292: "USER = \"\" # TODO: change this to your name.\n", - line 1344: " #TODO: add and configure the pre-built KFP CustomContainerTrainingJobRunOp component using\n", - line 1355: " # TODO: fill in the remaining arguments from the pipeline definition.\n", - line 1506: "#TODO: Generate online predictions using your Vertex Endpoint. \n", - line 1520: "#TODO: write a movie review to test your model e.g. \"The Dark Knight is the best Batman movie!\"\n", - line 1531: "# TODO: use your Endpoint to return prediction for your 'test_review' using 'endpoint.predict()' method.\n", courses/machine_learning/deepdive2/text_classification/labs/classify_text_with_bert.ipynb (16 lines): - line 158: "**TODO: Set path to a folder outside the git repo where the IMDB data will be downloaded**" - line 512: "**TODO 1: Use hub.KerasLaye to initialize the preprocessing**" - line 529: "bert_preprocess_model = #TODO: your code goes here" - line 545: "**TODO 2: Call the preprocess model function and pass text_test**" - line 563: "text_preprocessed = #TODO: Code goes here\n", - line 677: "**TODO 3: Define your model. It should contain the preprocessing model, the selected BERT model (smallBERT), a dense layer and dropout layer**\n", - line 703: " # TODO: define your model here\n", - line 766: "**TODO 4: define your loss and evaluation metric here. Since it is a binary classification use BinaryCrossentropy and BinaryAccuracy**" - line 783: "loss = #TODO: your code goes here\n", - line 784: "metrics = #TODO: your code goes here" - line 843: "**TODO 5: complile the model using the optimizer, loss and metrics you defined above**" - line 860: "#TODO: Model compile code goes here" - line 876: "**TODO 6: write code to fit the model and start training**" - line 894: "history = #TODO: model fit code goes here" - line 1008: "**TODO 7: Write code to save the model to saved_model_path**" - line 1028: "#TODO: your code goes here" courses/machine_learning/asl/05_review/labs/2_sample_dataset.ipynb (15 lines): - line 327: "train_query = # TODO: Your code goes here\n", - line 328: "eval_query = # TODO: Your code goes here\n", - line 329: "test_query = # TODO: Your code goes here\n", - line 331: "train_df = # TODO: Your code goes here\n", - line 332: "eval_df = # TODO: Your code goes here\n", - line 333: "test_df = # TODO: Your code goes here\n", - line 413: " df = # TODO: Your code goes here\n", - line 414: " df = # TODO: Your code goes here\n", - line 415: " df = # TODO: Your code goes here\n", - line 416: " df = # TODO: Your code goes here\n", - line 425: " # TODO: Your code goes here\n", - line 426: " # TODO: Your code goes here\n", - line 508: "# TODO: Your code goes here\n", - line 509: "# TODO: Your code goes here\n", - line 510: "# TODO: Your code goes here" courses/machine_learning/deepdive/02_tensorflow/labs/d_csv_input_fn.ipynb (15 lines): - line 85: " fields = # TODO: Your code goes here\n", - line 86: " features = # TODO: Your code goes here\n", - line 87: " labels = # TODO: Your code goes here\n", - line 133: " dataset = # TODO: Your code goes here\n", - line 134: " dataset = # TODO: Your code goes here\n", - line 222: " dataset = # TODO: Your code goes here\n", - line 223: " dataset = # TODO: Your code goes here\n", - line 243: " dataset = # TODO: Your code goes here\n", - line 244: " dataset = # TODO: Your code goes here\n", - line 287: "feature_cols = # TODO: Your code goes here\n", - line 324: "model = # TODO: Your code goes here" - line 358: " input_fn = # TODO: Your code goes here,\n", - line 359: " steps = # TODO: Your code goes here\n", - line 387: "metrics = # TODO: Your code goes here\n", - line 388: "print(\"RMSE on dataset = {}\".format(# TODO: Your code goes here))" courses/machine_learning/deepdive/05_review/labs/2_sample_dataset.ipynb (15 lines): - line 327: "train_query = # TODO: Your code goes here\n", - line 328: "eval_query = # TODO: Your code goes here\n", - line 329: "test_query = # TODO: Your code goes here\n", - line 331: "train_df = # TODO: Your code goes here\n", - line 332: "eval_df = # TODO: Your code goes here\n", - line 333: "test_df = # TODO: Your code goes here\n", - line 413: " df = # TODO: Your code goes here\n", - line 414: " df = # TODO: Your code goes here\n", - line 415: " df = # TODO: Your code goes here\n", - line 416: " df = # TODO: Your code goes here\n", - line 425: " # TODO: Your code goes here\n", - line 426: " # TODO: Your code goes here\n", - line 508: "# TODO: Your code goes here\n", - line 509: "# TODO: Your code goes here\n", - line 510: "# TODO: Your code goes here" courses/machine_learning/asl/02_tensorflow/labs/d_csv_input_fn.ipynb (15 lines): - line 85: " fields = # TODO: Your code goes here\n", - line 86: " features = # TODO: Your code goes here\n", - line 87: " labels = # TODO: Your code goes here\n", - line 133: " dataset = # TODO: Your code goes here\n", - line 134: " dataset = # TODO: Your code goes here\n", - line 222: " dataset = # TODO: Your code goes here\n", - line 223: " dataset = # TODO: Your code goes here\n", - line 243: " dataset = # TODO: Your code goes here\n", - line 244: " dataset = # TODO: Your code goes here\n", - line 287: "feature_cols = # TODO: Your code goes here\n", - line 324: "model = # TODO: Your code goes here" - line 358: " input_fn = # TODO: Your code goes here,\n", - line 359: " steps = # TODO: Your code goes here\n", - line 387: "metrics = # TODO: Your code goes here\n", - line 388: "print(\"RMSE on dataset = {}\".format(# TODO: Your code goes here))" courses/machine_learning/deepdive2/structured/labs/4b_keras_dnn_babyweight.ipynb (14 lines): - line 144: "# TODO: Create list of string column headers, make sure order matches.\n", - line 147: "# TODO: Add string name for label column\n", - line 193: " # TODO: Make a CSV dataset\n", - line 196: " # TODO: Map dataset to features and label\n", - line 233: " # TODO: Create dictionary of tf.keras.layers.Input for each raw feature\n", - line 260: " # TODO: Create feature columns for numeric features\n", - line 263: " # TODO: Add feature columns for categorical features\n", - line 291: " # TODO: Create two hidden layers of [64, 32] just in like the BQML DNN\n", - line 293: " # TODO: Create final output layer\n", - line 322: " # TODO: Calculate RMSE from true and predicted labels\n", - line 365: " # TODO: Add custom eval metrics to list\n", - line 420: "# TODO: Load training dataset\n", - line 423: "# TODO: Load evaluation dataset\n", - line 433: "# TODO: Fit model on training dataset and evaluate every so often\n", courses/machine_learning/asl/03_model_performance/labs/b_feature_engineering_wd.ipynb (14 lines): - line 159: "fc_crossed_dloc = # TODO: Your code goes here\n", - line 160: "fc_crossed_ploc = # TODO: Your code goes here\n", - line 161: "fc_crossed_pd_pair = # TODO: Your code goes here" - line 226: " tf.feature_column.embedding_column(categorical_column = # TODO: Your code goes here\n", - line 227: " tf.feature_column.embedding_column(categorical_column = # TODO: Your code goes here\n", - line 230: " # TODO: Your code goes here\n", - line 231: " # TODO: Your code goes here\n", - line 232: " # TODO: Your code goes here\n", - line 233: " # TODO: Your code goes here\n", - line 234: " # TODO: Your code goes here\n", - line 235: " # TODO: Your code goes here\n", - line 236: " # TODO: Your code goes here \n", - line 308: " # TODO: Your code goes here\n", - line 378: " # TODO: Your code goes here\n", courses/developingapps/v1.2/nodejs/cloudstorage/end/server/gcp/cloudstorage.js (14 lines): - line 18: // TODO: Load the module for Cloud Storage - line 24: // TODO: Create the storage client - line 39: // TODO: Get the GCLOUD_BUCKET environment variable - line 49: // TODO: Get a reference to the Cloud Storage bucket - line 77: // TODO: Get a reference to the new object - line 83: // TODO: Create a stream to write the file into - line 98: // TODO: Attach two event handlers (1) error - line 102: // TODO: If there's an error move to the next handler - line 106: // END TODO - line 111: // TODO: Attach two event handlers (2) finish - line 115: // TODO: Make the object publicly accessible - line 117: // TODO: Set a new property on the file for the - line 131: // TODO: Invoke the next middleware handler - line 142: // TODO: End the stream to upload the file's data courses/developingapps/java/datastore/start/src/main/java/com/google/training/appdev/services/gcp/datastore/QuestionService.java (14 lines): - line 18: // TODO: Import the com.google.cloud.datastore.* package - line 35: // TODO: Create a Datastore client object, datastore - line 44: // TODO: Declare a static final String named kind - line 54: // TODO: Create a KeyFactory for Question entities - line 63: // TODO: Modify return type to Key - line 69: // TODO: Declare the entity key, - line 75: // TODO: Declare the entity object, with the key and data - line 87: // TODO: Save the entity - line 91: // TODO: Return the key - line 100: // TODO: Remove this code - line 120: // TODO: Create the query - line 130: // TODO: Execute the query - line 137: // TODO: Return the transformed results - line 148: /* TODO: Uncomment this block courses/machine_learning/deepdive/03_model_performance/labs/b_feature_engineering_wd.ipynb (14 lines): - line 159: "fc_crossed_dloc = # TODO: Your code goes here\n", - line 160: "fc_crossed_ploc = # TODO: Your code goes here\n", - line 161: "fc_crossed_pd_pair = # TODO: Your code goes here" - line 226: " tf.feature_column.embedding_column(categorical_column = # TODO: Your code goes here\n", - line 227: " tf.feature_column.embedding_column(categorical_column = # TODO: Your code goes here\n", - line 230: " # TODO: Your code goes here\n", - line 231: " # TODO: Your code goes here\n", - line 232: " # TODO: Your code goes here\n", - line 233: " # TODO: Your code goes here\n", - line 234: " # TODO: Your code goes here\n", - line 235: " # TODO: Your code goes here\n", - line 236: " # TODO: Your code goes here \n", - line 308: " # TODO: Your code goes here\n", - line 378: " # TODO: Your code goes here\n", self-paced-labs/vertex-ai/vertex-challenge-lab/vertex-challenge-lab-solution.ipynb (14 lines): - line 18: "When you take a Challenge Lab, you will not be taught Google Cloud concepts. To build the solution to the challenge presented, use skills learned from the labs in the Quest this challenge lab is part of. You are expected to extend your learned skills and complete all the **`TODO:`** comments in this notebook.\n", - line 108: "# TODO: fill in PROJECT_ID.\n", - line 526: " # TODO: Add a hub.KerasLayer for BERT text preprocessing using the hparams dict. \n", - line 530: " # TODO: Add a hub.KerasLayer for BERT text encoding using the hparams dict.\n", - line 611: " # TODO: Save your BERT sentiment classifier locally. \n", - line 883: " # TODO: Add a hub.KerasLayer for BERT text preprocessing using the hparams dict. \n", - line 887: " # TODO: Add a hub.KerasLayer for BERT text encoding using the hparams dict.\n", - line 1126: "# TODO: create a Docker Artifact Registry using the gcloud CLI. Note the required 'repository-format', 'location' and 'description' flags.\n", - line 1195: "# TODO: use Cloud Build to build and submit your custom model container to your Artifact Registry.\n", - line 1294: " #TODO: add and configure the pre-built KFP CustomContainerTrainingJobRunOp component using\n", - line 1305: " # TODO: fill in the remaining arguments from the pipeline definition.\n", - line 1459: "#TODO: Generate online predictions using your Vertex Endpoint. \n", - line 1474: "#TODO: write a movie review to test your model e.g. \"The Dark Knight is the best Batman movie!\"\n", - line 1485: "# TODO: use your Endpoint to return prediction for your 'test_review' using 'endpoint.predict()' method.\n", courses/developingapps/v1.3/java/datastore/start/src/main/java/com/google/training/appdev/services/gcp/datastore/QuestionService.java (14 lines): - line 18: // TODO: Import the com.google.cloud.datastore.* package - line 35: // TODO: Create a Datastore client object, datastore - line 44: // TODO: Declare a static final String named kind - line 54: // TODO: Create a KeyFactory for Question entities - line 63: // TODO: Modify return type to Key - line 69: // TODO: Declare the entity key, - line 75: // TODO: Declare the entity object, with the key and data - line 87: // TODO: Save the entity - line 91: // TODO: Return the key - line 100: // TODO: Remove this code - line 120: // TODO: Create the query - line 130: // TODO: Execute the query - line 137: // TODO: Return the transformed results - line 148: /* TODO: Uncomment this block courses/developingapps/v1.2/java/datastore/start/src/main/java/com/google/training/appdev/services/gcp/datastore/QuestionService.java (14 lines): - line 18: // TODO: Import the com.google.cloud.datastore.* package - line 35: // TODO: Create a Datastore client object, datastore - line 44: // TODO: Declare a static final String named kind - line 54: // TODO: Create a KeyFactory for Question entities - line 63: // TODO: Modify return type to Key - line 69: // TODO: Declare the entity key, - line 75: // TODO: Declare the entity object, with the key and data - line 87: // TODO: Save the entity - line 91: // TODO: Return the key - line 100: // TODO: Remove this code - line 120: // TODO: Create the query - line 130: // TODO: Execute the query - line 137: // TODO: Return the transformed results - line 148: /* TODO: Uncomment this block courses/developingapps/v1.3/nodejs/cloudstorage/end/server/gcp/cloudstorage.js (14 lines): - line 18: // TODO: Load the module for Cloud Storage - line 24: // TODO: Create the storage client - line 39: // TODO: Get the GCLOUD_BUCKET environment variable - line 49: // TODO: Get a reference to the Cloud Storage bucket - line 77: // TODO: Get a reference to the new object - line 83: // TODO: Create a stream to write the file into - line 98: // TODO: Attach two event handlers (1) error - line 102: // TODO: If there's an error move to the next handler - line 106: // END TODO - line 111: // TODO: Attach two event handlers (2) finish - line 115: // TODO: Make the object publicly accessible - line 117: // TODO: Set a new property on the file for the - line 131: // TODO: Invoke the next middleware handler - line 142: // TODO: End the stream to upload the file's data courses/machine_learning/deepdive2/art_and_science_of_ml/labs/hyperparameter_tuning.ipynb (13 lines): - line 752: " # TODO 1\n", - line 753: " hp_metric = # TODO: Your code goes here\n", - line 755: " # TODO 1\n", - line 756: " hpt = # TODO: Your code goes here\n", - line 757: " # TODO: Your code goes here\n", - line 907: " maxTrials: # TODO 2: Your code goes here\n", - line 908: " maxParallelTrials: # TODO 2: Your code goes here\n", - line 909: " hyperparameterMetricTag: # TODO 2: Your code goes here\n", - line 913: " # TODO 2: Your code goes here\n", - line 915: " # TODO 2: Your code goes here\n", - line 917: " # TODO 2: Your code goes here\n", - line 1274: "# TODO 3\n", - line 1276: " # TODO 3: Your code goes here\n", courses/developingapps/v1.2/nodejs/cloudstorage/start/server/gcp/cloudstorage.js (13 lines): - line 17: // TODO: Load the module for Cloud Storage - line 23: // TODO: Create the storage client - line 37: // TODO: Get the GCLOUD_BUCKET environment variable - line 47: // TODO: Get a reference to the Cloud Storage bucket - line 73: // TODO: Get a reference to the new object - line 79: // TODO: Create a stream to write the file into - line 94: // TODO: Attach two event handlers (1) error - line 97: // TODO: If there's an error move to the next handler - line 107: // TODO: Attach two event handlers (2) finish - line 111: // TODO: Make the object publicly accessible - line 114: // TODO: Set a new property on the file for the - line 126: // TODO: Invoke the next middleware handler - line 137: // TODO: End the stream to upload the file's data courses/developingapps/v1.2/nodejs/pubsub-languageapi-spanner/end/console/worker.js (13 lines): - line 14: // TODO: Load the ../server/gcp/pubsub module - line 19: // TODO: Load the ../server/gcp/languageapi module - line 24: // TODO: Load the ../server/gcp/spanner module - line 35: // TODO: Log the message to the console - line 41: // TODO: Invoke the languageapi module method - line 45: // TODO: Log sentiment score - line 50: // TODO: Add a score property to feedback object - line 58: // TODO: Pass on the feedback object - line 64: // TODO: Add third .then(...) - line 66: // TODO Log feedback saved message - line 71: // END TODO - line 73: // TODO close off the promise with a catch and log - line 81: // TODO: Register the callback with the module courses/machine_learning/asl/05_review/labs/1_explore_dataset.ipynb (13 lines): - line 140: " # TODO: Your code goes here\n", - line 147: "df = # TODO: Your code goes here\n", - line 168: " # TODO: Your code goes here\n", - line 170: " return # TODO: Your code goes here" - line 189: "df = # TODO: Your code goes here\n", - line 192: "df.plot(# TODO: Your code goes here\n", - line 193: "df.plot(# TODO: Your code goes here" - line 203: "df = # TODO: Your code goes here\n", - line 206: "df.plot(# TODO: Your code goes here\n", - line 207: "df.plot(# TODO: Your code goes here" - line 217: "df = # TODO: Your code goes here\n", - line 220: "df.plot(# TODO: Your code goes here\n", - line 221: "df.plot(# TODO: Your code goes here" courses/developingapps/v1.2/nodejs/pubsub-languageapi-spanner/end/server/gcp/pubsub.js (13 lines): - line 15: // TODO: Load the Cloud Pub/Sub module - line 20: // TODO: Create a client object against Cloud Pub/Sub - line 34: // TODO: Get a reference to the feedback topic - line 45: // TODO: Publish a message to the feedback topic - line 63: // TODO: Create a subscription called worker-subscription - line 64: // TODO: Have it auto-acknowledge messages - line 68: // TODO: Trap errors where the subscription already exists - line 80: // TODO: Use the get() method on the subscription object to call - line 87: // TODO: Declare a subscription constant - line 92: // TODO: Register an event handler for message events - line 101: // TODO: Register an event handler for error events - line 111: // END TODO for the get() method promise - line 120: // END TODO for the create subscription method courses/developingapps/nodejs/cloudstorage/start/server/gcp/cloudstorage.js (13 lines): - line 17: // TODO: Load the module for Cloud Storage - line 23: // TODO: Create the storage client - line 37: // TODO: Get the GCLOUD_BUCKET environment variable - line 47: // TODO: Get a reference to the Cloud Storage bucket - line 73: // TODO: Get a reference to the new object - line 79: // TODO: Create a stream to write the file into - line 94: // TODO: Attach two event handlers (1) error - line 97: // TODO: If there's an error move to the next handler - line 107: // TODO: Attach two event handlers (2) finish - line 111: // TODO: Make the object publicly accessible - line 114: // TODO: Set a new property on the file for the - line 126: // TODO: Invoke the next middleware handler - line 136: // TODO: Upload the file's data into Cloud Storage courses/developingapps/v1.3/nodejs/pubsub-languageapi-spanner/end/console/worker.js (13 lines): - line 14: // TODO: Load the ../server/gcp/pubsub module - line 19: // TODO: Load the ../server/gcp/languageapi module - line 24: // TODO: Load the ../server/gcp/spanner module - line 35: // TODO: Log the message to the console - line 41: // TODO: Invoke the languageapi module method - line 45: // TODO: Log sentiment score - line 50: // TODO: Add a score property to feedback object - line 58: // TODO: Pass on the feedback object - line 64: // TODO: Add third .then(...) - line 66: // TODO Log feedback saved message - line 71: // END TODO - line 73: // TODO close off the promise with a catch and log - line 81: // TODO: Register the callback with the module courses/machine_learning/deepdive2/supplemental/labs/deepconv_gan.ipynb (13 lines): - line 262: "#TODO 1\n", - line 266: " # TODO: Your code goes here.\n", - line 330: "#TODO 1.\n", - line 334: " # TODO: Your code goes here.\n", - line 449: "#TODO 2\n", - line 451: " real_loss = # TODO: Your code goes here.\n", - line 452: " fake_loss = # TODO: Your code goes here.\n", - line 453: " total_loss = # TODO: Your code goes here.\n", - line 486: "#TODO 2\n", - line 611: "# TODO 3\n", - line 617: " generated_images = # TODO: Your code goes here.\n", - line 741: "# TODO 4\n", - line 742: "# TODO: Your code goes here." courses/developingapps/v1.3/nodejs/cloudstorage/start/server/gcp/cloudstorage.js (13 lines): - line 17: // TODO: Load the module for Cloud Storage - line 23: // TODO: Create the storage client - line 37: // TODO: Get the GCLOUD_BUCKET environment variable - line 47: // TODO: Get a reference to the Cloud Storage bucket - line 73: // TODO: Get a reference to the new object - line 79: // TODO: Create a stream to write the file into - line 94: // TODO: Attach two event handlers (1) error - line 97: // TODO: If there's an error move to the next handler - line 107: // TODO: Attach two event handlers (2) finish - line 111: // TODO: Make the object publicly accessible - line 114: // TODO: Set a new property on the file for the - line 126: // TODO: Invoke the next middleware handler - line 137: // TODO: End the stream to upload the file's data courses/machine_learning/asl/03_model_performance/labs/a_feature_engineering_dnn.ipynb (13 lines): - line 152: "fc_dayofweek = # TODO: Your code goes here\n", - line 153: "fc_hourofday = # TODO: Your code goes here\n", - line 159: "fc_bucketized_plat = # TODO: Your code goes here\n", - line 160: "fc_bucketized_plon = # TODO: Your code goes here\n", - line 161: "fc_bucketized_dlat = # TODO: Your code goes here\n", - line 162: "fc_bucketized_dlon = # TODO: Your code goes here\n", - line 165: "fc_crossed_day_hr = # TODO: Your code goes here" - line 209: " features[\"latdiff\"] = # TODO: Your code goes here\n", - line 210: " features[\"londiff\"] = # TODO: Your code goes here\n", - line 211: " features[\"euclidean_dist\"] = # TODO: Your code goes here\n", - line 246: " # TODO: Your code goes here\n", - line 249: " # TODO: Your code goes here\n", - line 289: " features = # TODO: Your code goes here\n", courses/developingapps/v1.2/nodejs/pubsub-languageapi-spanner/start/server/gcp/pubsub.js (13 lines): - line 15: // TODO: Load the Cloud Pub/Sub module - line 21: // TODO: Create a client object against Cloud Pub/Sub - line 35: // TODO: Get a reference to the feedback topic - line 47: // TODO: Publish a message to the feedback topic - line 67: // TODO: Create a subscription called worker-subscription - line 68: // TODO: Have it auto-acknowledge messages - line 71: // TODO: Trap errors where the subscription already exists - line 78: // TODO: Use the get() method on the subscription object to call - line 85: // TODO: Declare a subscription constant - line 90: // TODO: Register an event handler for message events - line 98: // TODO: Register an event handler for error events - line 105: // END TODO for the get() method promise - line 114: // END TODO for the create subscription method courses/machine_learning/deepdive/02_tensorflow/labs/c_estimator.ipynb (13 lines): - line 104: "feature_columns = # TODO: Your code goes here" - line 133: "The first TODO in the `train_input_fn` asks you to create a tf.dataset using the [tf.data.Dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) API for input pipelines. Complete the code so that the variable `dataset` creates a tf.data.Dataset element using the [tf.from_tensor_slices method](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_tensor_slices). The argument `tensors` should be a tuple of a dict of the features and the label taken from the Pandas dataframe. \n", - line 135: "The second TODO in the `train_input_fn` asks you to add a shuffle, repeat and batch operation to the dataset object you created above. Have a look at [the usage of these methods in the tf.data.Datasets API](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#methods)\n", - line 137: "The next TODO is in the `eval_input_fn`. Here you are asked to create a dataset object for the validation data. It should look similar to the pipeline you created for the `train_input_fn`. Note that for the `eval_input_fn` we don't add a shuffle or repeat step as we'll just evaluation a given batch during each validation step.\n", - line 139: "The last TODO is in the `predict_input_fn` where you are asked to once again use the Tensorflow Dataset API to set up a dataset for the prediction stage using the same `from_tensor_slices` as before. Note, during `PREDICT` we don't have the label, only features. " - line 150: " dataset = # TODO: Your code goes here\n", - line 157: " dataset = # TODO: Your code goes here\n", - line 163: " dataset = # TODO: Your code goes here\n", - line 172: " dataset = # TODO: Your code goes here\n", - line 216: "# TODO: Your code goes here\n", - line 274: " # TODO: Your code goes here\n", - line 373: "|Linear Model| TODO: Your results go here |\n", - line 374: "|DNN Model|TODO: Your results go here |" courses/machine_learning/asl/02_tensorflow/labs/c_estimator.ipynb (13 lines): - line 104: "feature_columns = # TODO: Your code goes here" - line 133: "The first TODO in the `train_input_fn` asks you to create a tf.dataset using the [tf.data.Dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) API for input pipelines. Complete the code so that the variable `dataset` creates a tf.data.Dataset element using the [tf.from_tensor_slices method](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_tensor_slices). The argument `tensors` should be a tuple of a dict of the features and the label taken from the Pandas dataframe. \n", - line 135: "The second TODO in the `train_input_fn` asks you to add a shuffle, repeat and batch operation to the dataset object you created above. Have a look at [the usage of these methods in the tf.data.Datasets API](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#methods)\n", - line 137: "The next TODO is in the `eval_input_fn`. Here you are asked to create a dataset object for the validation data. It should look similar to the pipeline you created for the `train_input_fn`. Note that for the `eval_input_fn` we don't add a shuffle or repeat step as we'll just evaluation a given batch during each validation step.\n", - line 139: "The last TODO is in the `predict_input_fn` where you are asked to once again use the Tensorflow Dataset API to set up a dataset for the prediction stage using the same `from_tensor_slices` as before. Note, during `PREDICT` we don't have the label, only features. " - line 150: " dataset = # TODO: Your code goes here\n", - line 157: " dataset = # TODO: Your code goes here\n", - line 163: " dataset = # TODO: Your code goes here\n", - line 172: " dataset = # TODO: Your code goes here\n", - line 216: "# TODO: Your code goes here\n", - line 274: " # TODO: Your code goes here\n", - line 373: "|Linear Model| TODO: Your results go here |\n", - line 374: "|DNN Model|TODO: Your results go here |" courses/machine_learning/deepdive/03_model_performance/labs/a_feature_engineering_dnn.ipynb (13 lines): - line 152: "fc_dayofweek = # TODO: Your code goes here\n", - line 153: "fc_hourofday = # TODO: Your code goes here\n", - line 159: "fc_bucketized_plat = # TODO: Your code goes here\n", - line 160: "fc_bucketized_plon = # TODO: Your code goes here\n", - line 161: "fc_bucketized_dlat = # TODO: Your code goes here\n", - line 162: "fc_bucketized_dlon = # TODO: Your code goes here\n", - line 165: "fc_crossed_day_hr = # TODO: Your code goes here" - line 209: " features[\"latdiff\"] = # TODO: Your code goes here\n", - line 210: " features[\"londiff\"] = # TODO: Your code goes here\n", - line 211: " features[\"euclidean_dist\"] = # TODO: Your code goes here\n", - line 246: " # TODO: Your code goes here\n", - line 249: " # TODO: Your code goes here\n", - line 289: " features = # TODO: Your code goes here\n", courses/developingapps/v1.3/nodejs/pubsub-languageapi-spanner/start/server/gcp/pubsub.js (13 lines): - line 15: // TODO: Load the Cloud Pub/Sub module - line 21: // TODO: Create a client object against Cloud Pub/Sub - line 35: // TODO: Get a reference to the feedback topic - line 47: // TODO: Publish a message to the feedback topic - line 67: // TODO: Create a subscription called worker-subscription - line 68: // TODO: Have it auto-acknowledge messages - line 71: // TODO: Trap errors where the subscription already exists - line 78: // TODO: Use the get() method on the subscription object to call - line 85: // TODO: Declare a subscription constant - line 90: // TODO: Register an event handler for message events - line 98: // TODO: Register an event handler for error events - line 105: // END TODO for the get() method promise - line 114: // END TODO for the create subscription method courses/machine_learning/deepdive/05_review/labs/1_explore_dataset.ipynb (13 lines): - line 140: " # TODO: Your code goes here\n", - line 147: "df = # TODO: Your code goes here\n", - line 168: " # TODO: Your code goes here\n", - line 170: " return # TODO: Your code goes here" - line 189: "df = # TODO: Your code goes here\n", - line 192: "df.plot(# TODO: Your code goes here\n", - line 193: "df.plot(# TODO: Your code goes here" - line 203: "df = # TODO: Your code goes here\n", - line 206: "df.plot(# TODO: Your code goes here\n", - line 207: "df.plot(# TODO: Your code goes here" - line 217: "df = # TODO: Your code goes here\n", - line 220: "df.plot(# TODO: Your code goes here\n", - line 221: "df.plot(# TODO: Your code goes here" courses/developingapps/v1.3/nodejs/pubsub-languageapi-spanner/end/server/gcp/pubsub.js (13 lines): - line 15: // TODO: Load the Cloud Pub/Sub module - line 20: // TODO: Create a client object against Cloud Pub/Sub - line 34: // TODO: Get a reference to the feedback topic - line 45: // TODO: Publish a message to the feedback topic - line 63: // TODO: Create a subscription called worker-subscription - line 64: // TODO: Have it auto-acknowledge messages - line 68: // TODO: Trap errors where the subscription already exists - line 80: // TODO: Use the get() method on the subscription object to call - line 87: // TODO: Declare a subscription constant - line 92: // TODO: Register an event handler for message events - line 101: // TODO: Register an event handler for error events - line 111: // END TODO for the get() method promise - line 120: // END TODO for the create subscription method courses/machine_learning/deepdive/03_model_performance/labs/d_hyperparameter_tuning.ipynb (12 lines): - line 326: " # TODO: Your code goes here\n", - line 359: " # TODO: Your code goes here\n", - line 415: " - # TODO: Your code goes here" - line 445: "!gsutil -m rm -rf # TODO: Your code goes here\n", - line 446: "!gcloud ai-platform # TODO: Your code goes here\n", - line 447: " --package-path= # TODO: Your code goes here\n", - line 448: " --module-name= # TODO: Your code goes here\n", - line 449: " --config= # TODO: Your code goes here\n", - line 450: " --job-dir= # TODO: Your code goes here\n", - line 451: " --python-version= # TODO: Your code goes here\n", - line 452: " --runtime-version= # TODO: Your code goes here\n", - line 453: " --region= # TODO: Your code goes here\n", courses/developingapps/nodejs/pubsub-languageapi-spanner/start/server/gcp/pubsub.js (12 lines): - line 15: // TODO: Load the Cloud Pub/Sub module - line 21: // TODO: Create a client object against Cloud Pub/Sub - line 35: // TODO: Get a reference to the feedback topic - line 47: // TODO: Publish a message to the feedback topic - line 66: // TODO: Create a subscription called worker-subscription - line 67: // TODO: Have it auto-acknowledge messages - line 74: // TODO: Declare a subscription constant - line 80: // TODO: Register an event handler for message event - line 81: // TODO: Use an arrow function to handle the event - line 84: // TODO: When a message arrives, invoke a callback - line 92: // TODO: Register an event handler for error event - line 95: // TODO: Print the error to the console courses/developingapps/v1.3/nodejs/pubsub-languageapi-spanner/start/console/worker.js (12 lines): - line 14: // TODO: Load the ../server/gcp/pubsub module - line 20: // TODO: Load the ../server/gcp/languageapi module - line 27: // TODO: Load the ../server/gcp/spanner module - line 43: // TODO: Log the message to the console - line 48: // TODO: Invoke the languageapi module method - line 52: // TODO: Log sentiment score - line 57: // TODO: Add a score property to feedback object - line 66: // TODO: Pass on the feedback object - line 73: // TODO: Add third .then(...) - line 75: // TODO Log feedback saved message - line 82: // TODO close off the promise chain with a catch() and log - line 89: // TODO: Register the callback with the module courses/machine_learning/asl/03_model_performance/labs/d_hyperparameter_tuning.ipynb (12 lines): - line 326: " # TODO: Your code goes here\n", - line 359: " # TODO: Your code goes here\n", - line 415: " - # TODO: Your code goes here" - line 445: "!gsutil -m rm -rf # TODO: Your code goes here\n", - line 446: "!gcloud ai-platform # TODO: Your code goes here\n", - line 447: " --package-path= # TODO: Your code goes here\n", - line 448: " --module-name= # TODO: Your code goes here\n", - line 449: " --config= # TODO: Your code goes here\n", - line 450: " --job-dir= # TODO: Your code goes here\n", - line 451: " --python-version= # TODO: Your code goes here\n", - line 452: " --runtime-version= # TODO: Your code goes here\n", - line 453: " --region= # TODO: Your code goes here\n", courses/machine_learning/deepdive2/building_production_ml_systems/labs/2_hyperparameter_tuning.ipynb (12 lines): - line 434: " # TODO 1\n", - line 435: " hp_metric = # TODO: Your code goes here\n", - line 437: " # TODO 1\n", - line 438: " hpt = # TODO: Your code goes here\n", - line 439: " # TODO: Your code goes here\n", - line 573: " maxTrials: # TODO: Your code goes here\n", - line 574: " maxParallelTrials: # TODO: Your code goes here\n", - line 575: " hyperparameterMetricTag: # TODO: Your code goes here\n", - line 579: " # TODO: Your code goes here\n", - line 581: " # TODO: Your code goes here\n", - line 583: " # TODO: Your code goes here\n", - line 674: " # TODO: Your code goes here\n", courses/developingapps/v1.2/nodejs/pubsub-languageapi-spanner/start/console/worker.js (12 lines): - line 14: // TODO: Load the ../server/gcp/pubsub module - line 20: // TODO: Load the ../server/gcp/languageapi module - line 27: // TODO: Load the ../server/gcp/spanner module - line 43: // TODO: Log the message to the console - line 48: // TODO: Invoke the languageapi module method - line 52: // TODO: Log sentiment score - line 57: // TODO: Add a score property to feedback object - line 66: // TODO: Pass on the feedback object - line 73: // TODO: Add third .then(...) - line 75: // TODO Log feedback saved message - line 82: // TODO close off the promise chain with a catch() and log - line 89: // TODO: Register the callback with the module courses/machine_learning/deepdive2/structured/labs/3b_bqml_linear_transform_babyweight.ipynb (11 lines): - line 140: " # TODO: Add base features and label\n", - line 142: " # TODO: Cross categorical features\n", - line 176: " # TODO: Add same features and label as training\n", - line 195: " # TODO: Select just the calculated RMSE\n", - line 200: " # TODO: Add same features and label as training\n", - line 249: " # TODO: Bucketize mother_age\n", - line 253: " # TODO: Bucketize gestation_weeks\n", - line 303: " # TODO: Add same features and label as training\n", - line 328: " # TODO: Add same features and label as training\n", - line 360: " # TODO: Add base features and label as you would in select\n", - line 361: " # TODO: Add transformed features as you would in select\n", courses/developingapps/nodejs/pubsub-languageapi-spanner/start/console/worker.js (11 lines): - line 14: // TODO: Load the ../server/gcp/pubsub module - line 20: // TODO: Load the ../server/gcp/languageapi module - line 27: // TODO: Load the ../server/gcp/spanner module - line 41: // TODO: Log the message to the console - line 48: // TODO: Invoke the languageapi module method - line 51: // TODO: Log sentiment score - line 56: // TODO: Add a score property to feedback object - line 63: // TODO: Pass on the feedback object - line 70: // TODO: Add third .then(...) - line 72: // TODO Log feedback saved message - line 81: // TODO: Register the callback with the module courses/machine_learning/deepdive/05_review/labs/4_preproc.ipynb (11 lines): - line 136: "bq = # TODO: Your code goes here\n", - line 138: "df = # TODO: Your code goes here\n", - line 207: " no_ultrasound[\"is_male\"] = # TODO: Your code goes here\n", - line 209: " no_ultrasound[\"plurality\"] = # TODO: Your code goes here\n", - line 211: " no_ultrasound[\"plurality\"] = # TODO: Your code goes here\n", - line 283: " | \"{}_read\".format(step) >> # TODO: Your code goes here\n", - line 284: " | \"{}_csv\".format(step) >> # TODO: Your code goes here\n", - line 285: " | \"{}_out\".format(step) >> # TODO: Your code goes here\n", - line 371: " # TODO Convert plurality from integers to strings\n", - line 381: " # TODO Mask is_male\n", - line 384: " # TODO Convert plurality from integers to strings and mask plurality > 1\n", courses/machine_learning/deepdive2/structured/labs/3a_bqml_baseline_babyweight.ipynb (11 lines): - line 137: " MODEL_TYPE=# TODO: Add model type,\n", - line 138: " INPUT_LABEL_COLS=[# TODO: label column name],\n", - line 142: " # TODO: Add features and label\n", - line 144: " # TODO: Add train table" - line 213: " ML.EVALUATE(MODEL # TODO: Add model name,\n", - line 216: " # TODO: Add features and label\n", - line 218: " # TODO: Add eval table\n", - line 256: " # TODO: Select just the calculated RMSE\n", - line 258: " ML.EVALUATE(MODEL # TODO: Add model name,\n", - line 261: " # TODO: Add features and label\n", - line 263: " # TODO: Add eval table\n", courses/machine_learning/asl/05_review/labs/4_preproc.ipynb (11 lines): - line 136: "bq = # TODO: Your code goes here\n", - line 138: "df = # TODO: Your code goes here\n", - line 207: " no_ultrasound[\"is_male\"] = # TODO: Your code goes here\n", - line 209: " no_ultrasound[\"plurality\"] = # TODO: Your code goes here\n", - line 211: " no_ultrasound[\"plurality\"] = # TODO: Your code goes here\n", - line 283: " | \"{}_read\".format(step) >> # TODO: Your code goes here\n", - line 284: " | \"{}_csv\".format(step) >> # TODO: Your code goes here\n", - line 285: " | \"{}_out\".format(step) >> # TODO: Your code goes here\n", - line 371: " # TODO Convert plurality from integers to strings\n", - line 381: " # TODO Mask is_male\n", - line 384: " # TODO Convert plurality from integers to strings and mask plurality > 1\n", courses/developingapps/v1.2/python/kubernetesengine/end/backend/quiz/console/worker.py (10 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules from the quiz.gcp package - line 43: # TODO: Acknowledge the message - line 51: # TODO: Log the message - line 59: # TODO: Use the languageapi module to analyze the sentiment - line 65: # TODO: Log the sentiment score - line 71: # TODO: Assign the sentiment score to a new score property - line 77: # TODO: Use the spanner module to save the feedback - line 81: # END TODO - line 83: # TODO: Log a message to say the feedback has been saved - line 97: # TODO: Register the callback courses/machine_learning/deepdive2/structured/labs/1b_prepare_data_babyweight.ipynb (10 lines): - line 168: "# TODO: Change environment variables\n", - line 213: "datasetexists=$(bq ls -d | grep -w # TODO: Add dataset name)\n", - line 223: " $PROJECT:# TODO: Add dataset name\n", - line 287: " # TODO: Add selected raw features and preprocessed features\n", - line 291: " # TODO: Add filters" - line 323: " # TODO: Replace is_male and plurality as indicated above\n", - line 373: " # TODO: Modulo hashmonth to be approximately 75% of the data" - line 408: " # TODO: Modulo hashmonth to be approximately 25% of the data" - line 465: "dataset_name = # TODO: Add dataset name\n", - line 472: "for step in [# TODO: Loop over train and eval]:\n", courses/developingapps/python/kubernetesengine/end/backend/quiz/console/worker.py (10 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules from the quiz.gcp package - line 43: # TODO: Acknowledge the message - line 51: # TODO: Log the message - line 59: # TODO: Use the languageapi module to analyze the sentiment - line 65: # TODO: Log the sentiment score - line 71: # TODO: Assign the sentiment score to a new score property - line 77: # TODO: Use the spanner module to save the feedback - line 81: # END TODO - line 83: # TODO: Log a message to say the feedback has been saved - line 97: # TODO: Register the callback courses/developingapps/python/pubsub-languageapi-spanner/start/quiz/console/worker.py (10 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules from the quiz.gcp package - line 47: # TODO: Acknowledge the message - line 55: # TODO: Log the message - line 63: # TODO: Use the languageapi module to analyze the sentiment - line 69: # TODO: Log the sentiment score - line 75: # TODO: Assign the sentiment score to a new score property - line 81: # TODO: Use the spanner module to save the feedback - line 85: # END TODO - line 87: # TODO: Log a message to say the feedback has been saved - line 101: # TODO: Register the callback courses/developingapps/v1.3/nodejs/cloudfunctions/end/function/index.js (10 lines): - line 14: // TODO: Load the ./languageapi module - line 20: // TODO: Load the ./spanner module - line 28: // TODO: Decode the Cloud Pub/Sub message - line 40: // TODO: Run Natural Language API sentiment analysis - line 47: // TODO: Log the sentiment score - line 53: // TODO: Add new score property to feedbackObject - line 59: // TODO: Pass feedback object to the next handler - line 66: // TODO: insert record - line 68: // TODO: Log and return success - line 78: // TODO: Catch and Log error courses/developingapps/v1.3/python/kubernetesengine/start/backend/quiz/console/worker.py (10 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules from the quiz.gcp package - line 43: # TODO: Acknowledge the message - line 51: # TODO: Log the message - line 59: # TODO: Use the languageapi module to analyze the sentiment - line 65: # TODO: Log the sentiment score - line 71: # TODO: Assign the sentiment score to a new score property - line 77: # TODO: Use the spanner module to save the feedback - line 81: # END TODO - line 83: # TODO: Log a message to say the feedback has been saved - line 97: # TODO: Register the callback courses/developingapps/v1.2/nodejs/cloudfunctions/end/function/index.js (10 lines): - line 14: // TODO: Load the ./languageapi module - line 20: // TODO: Load the ./spanner module - line 28: // TODO: Decode the Cloud Pub/Sub message - line 40: // TODO: Run Natural Language API sentiment analysis - line 47: // TODO: Log the sentiment score - line 53: // TODO: Add new score property to feedbackObject - line 59: // TODO: Pass feedback object to the next handler - line 66: // TODO: insert record - line 68: // TODO: Log and return success - line 78: // TODO: Catch and Log error courses/developingapps/java/appengine/start/function/index.js (10 lines): - line 1: // TODO: Load the ./languageapi module - line 7: // TODO: Load the ./spanner module - line 17: // TODO: Decode the Cloud Pub/Sub message - line 26: // TODO: Run Natural Language API sentiment analysis - line 33: // TODO: Log the sentiment score - line 39: // TODO: Add new score property to feedbackObject - line 45: // TODO: Pass feedback object to the next handler - line 50: }) // TODO: insert record - line 53: // TODO: Log and return success - line 60: }).catch(console.error); // TODO: Log error courses/developingapps/v1.2/python/kubernetesengine/start/backend/quiz/console/worker.py (10 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules from the quiz.gcp package - line 43: # TODO: Acknowledge the message - line 51: # TODO: Log the message - line 59: # TODO: Use the languageapi module to analyze the sentiment - line 65: # TODO: Log the sentiment score - line 71: # TODO: Assign the sentiment score to a new score property - line 77: # TODO: Use the spanner module to save the feedback - line 81: # END TODO - line 83: # TODO: Log a message to say the feedback has been saved - line 97: # TODO: Register the callback courses/machine_learning/deepdive2/end_to_end_ml/labs/prepare_data_babyweight.ipynb (10 lines): - line 168: "# TODO: Change environment variables\n", - line 213: "datasetexists=$(bq ls -d | grep -w # TODO: Add dataset name)\n", - line 223: " $PROJECT:# TODO: Add dataset name\n", - line 287: " # TODO: Add selected raw features and preprocessed features\n", - line 291: " # TODO: Add filters" - line 323: " # TODO: Replace is_male and plurality as indicated above\n", - line 373: " # TODO: Modulo hashmonth to be approximately 75% of the data" - line 408: " # TODO: Modulo hashmonth to be approximately 25% of the data" - line 465: "dataset_name = # TODO: Add dataset name\n", - line 472: "for step in [# TODO: Loop over train and eval]:\n", courses/developingapps/python/pubsub-languageapi-spanner/end/quiz/console/worker.py (10 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules from the quiz.gcp package - line 43: # TODO: Acknowledge the message - line 51: # TODO: Log the message - line 59: # TODO: Use the languageapi module to analyze the sentiment - line 65: # TODO: Log the sentiment score - line 71: # TODO: Assign the sentiment score to a new score property - line 77: # TODO: Use the spanner module to save the feedback - line 81: # END TODO - line 83: # TODO: Log a message to say the feedback has been saved - line 97: # TODO: Register the callback courses/machine_learning/asl/02_tensorflow/labs/e_traineval.ipynb (10 lines): - line 144: " receiver_tensors = # TODO: Your code goes here\n", - line 192: " # TODO: Your code goes here\n", - line 216: " # TODO: Your code goes here\n", - line 253: " pred_values = # TODO: Your code goes here\n", - line 255: " \"rmse\": # TODO: Your code goes here\n", - line 295: " input_fn = # TODO: Your code goes here\n", - line 296: " max_steps = # TODO: Your code goes here\n", - line 319: "exporter = # TODO: Your code goes here" - line 346: "eval_spec = # TODO: Your code goes here" - line 368: "tf.estimator.train_and_evaluate(# TODO: Your code goes here\n", courses/developingapps/python/kubernetesengine/end/backend/start/backend/quiz/console/worker.py (10 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules from the quiz.gcp package - line 43: # TODO: Acknowledge the message - line 51: # TODO: Log the message - line 59: # TODO: Use the languageapi module to analyze the sentiment - line 65: # TODO: Log the sentiment score - line 71: # TODO: Assign the sentiment score to a new score property - line 77: # TODO: Use the spanner module to save the feedback - line 81: # END TODO - line 83: # TODO: Log a message to say the feedback has been saved - line 97: # TODO: Register the callback courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/1_core_tensorflow.ipynb (10 lines): - line 124: "x.assign(45.8) # TODO 1\n", - line 134: "x.assign_add(4) # TODO 2\n", - line 144: "x.assign_sub(3) # TODO 3\n", - line 176: "a = tf.constant([5, 3, 8]) # TODO 1\n", - line 191: "a = tf.constant([5, 3, 8]) # TODO 2\n", - line 239: "tf.add(a_py, b_py) # TODO 1" - line 259: "tf.add(a_np, b_np) # TODO 2" - line 279: "tf.add(a_tf, b_tf) # TODO 3" - line 481: "# TODO 1\n", - line 667: "# TODO 2\n", courses/developingapps/v1.2/python/pubsub-languageapi-spanner/bonus/quiz/console/worker.py (10 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules - line 44: # TODO: Acknowledge the message - line 50: # TODO: Log the message - line 59: # TODO: Use the languageapi module - line 66: # TODO: Log the sentiment score - line 72: # TODO: Assign the sentiment score to - line 79: # TODO: Use the spanner module to save the feedback - line 83: # END TODO - line 85: # TODO: Log a message to say the feedback - line 110: # TODO: Register the callbacks courses/developingapps/java/appengine/end/function/index.js (10 lines): - line 1: // TODO: Load the ./languageapi module - line 7: // TODO: Load the ./spanner module - line 17: // TODO: Decode the Cloud Pub/Sub message - line 26: // TODO: Run Natural Language API sentiment analysis - line 33: // TODO: Log the sentiment score - line 39: // TODO: Add new score property to feedbackObject - line 45: // TODO: Pass feedback object to the next handler - line 50: }) // TODO: insert record - line 53: // TODO: Log and return success - line 60: }).catch(console.error); // TODO: Log error courses/developingapps/v1.3/java/appengine/start/function/index.js (10 lines): - line 1: // TODO: Load the ./languageapi module - line 7: // TODO: Load the ./spanner module - line 17: // TODO: Decode the Cloud Pub/Sub message - line 26: // TODO: Run Natural Language API sentiment analysis - line 33: // TODO: Log the sentiment score - line 39: // TODO: Add new score property to feedbackObject - line 45: // TODO: Pass feedback object to the next handler - line 50: }) // TODO: insert record - line 53: // TODO: Log and return success - line 60: }).catch(console.error); // TODO: Log error courses/developingapps/v1.2/java/appengine/start/function/index.js (10 lines): - line 1: // TODO: Load the ./languageapi module - line 7: // TODO: Load the ./spanner module - line 17: // TODO: Decode the Cloud Pub/Sub message - line 26: // TODO: Run Natural Language API sentiment analysis - line 33: // TODO: Log the sentiment score - line 39: // TODO: Add new score property to feedbackObject - line 45: // TODO: Pass feedback object to the next handler - line 50: }) // TODO: insert record - line 53: // TODO: Log and return success - line 60: }).catch(console.error); // TODO: Log error courses/machine_learning/deepdive2/text_classification/labs/automl_for_text_classification.ipynb (10 lines): - line 103: " # TODO: Your code goes here.\n", - line 105: " # TODO: Your code goes here.\n", - line 107: " # TODO: Your code goes here.\n", - line 108: " # TODO: Your code goes here.\n", - line 109: " # TODO: Your code goes here.\n", - line 138: " # TODO: Your code goes here.\n", - line 143: " # TODO: Your code goes here.\n", - line 145: " # TODO: Your code goes here.\n", - line 307: "sample_title_dataset = # TODO: Your code goes here.\n", - line 308: "# TODO: Your code goes here." courses/machine_learning/deepdive/05_review/labs/5_train_bqml.ipynb (10 lines): - line 267: "# TODO: Your code goes here\n", - line 321: "The cell below is missing the SQL query to examine the training statistics of our trained model. Complete the TODO below to view the results of our training job above. \n", - line 333: "# TODO: Your code goes here" - line 424: "Complete the TODO in the cell below to make predictions in BigQuery with our newly trained model `demo.babyweight_model_asis` on the `public.samples.natality` table. You'll need to preprocess the data for training by selecting only those examples which have\n", - line 456: " # TODO: Your code goes here\n", - line 664: "As in Exercise 1 above, below you are asked to complete the TODO in the cell below to train a linear regression model in BigQuery using `weight_pounds` as the label. This time, since we're using the supplemented dataset containing `without_ultrasound` data, name your model `babyweight_model_fc`. This model will reside within the `demo` dataset. \n", - line 686: "# TODO: Your code goes here\n", - line 766: "Just as in Exercise 2 above, let's plot the train and eval curve using the TRAINING_INFO from the model training job for the `babyweight_model_fc` model we trained above. Complete the TODO to create a Pandas dataframe that has the TRAINING_INFO from the training job. " - line 787: "df = # TODO: Your code goes here\n", - line 854: " # TODO: Your code goes here" courses/developingapps/v1.2/python/kubernetesengine/bonus/backend/quiz/console/worker.py (10 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules from the quiz.gcp package - line 43: # TODO: Acknowledge the message - line 51: # TODO: Log the message - line 59: # TODO: Use the languageapi module to analyze the sentiment - line 65: # TODO: Log the sentiment score - line 71: # TODO: Assign the sentiment score to a new score property - line 77: # TODO: Use the spanner module to save the feedback - line 81: # END TODO - line 83: # TODO: Log a message to say the feedback has been saved - line 97: # TODO: Register the callback courses/machine_learning/deepdive2/text_classification/solutions/rnn_encoder_decoder.ipynb (10 lines): - line 282: " # TODO 1a\n", - line 339: " # TODO 1b\n", - line 501: " # TODO 1c\n", - line 596: "# TODO 2a\n", - line 635: "# TODO 2b\n", - line 684: "# TODO 2c\n", - line 757: " # TODO 3a\n", - line 813: " # TODO 4: Sampling loop\n", - line 909: " # TODO 3b\n", - line 1000: " # TODO 5\n", courses/developingapps/nodejs/cloudfunctions/start/function/index.js (10 lines): - line 14: // TODO: Load the ./languageapi module - line 20: // TODO: Load the ./spanner module - line 31: // TODO: Decode the Cloud Pub/Sub message - line 40: // TODO: Run Natural Language API sentiment analysis - line 47: // TODO: Log the sentiment score - line 53: // TODO: Add new score property to feedbackObject - line 59: // TODO: Pass feedback object to the next handler - line 64: // TODO: insert record - line 66: // TODO: Log and return success - line 72: // TODO: Log error courses/developingapps/v1.3/python/kubernetesengine/end/backend/quiz/console/worker.py (10 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules from the quiz.gcp package - line 43: # TODO: Acknowledge the message - line 51: # TODO: Log the message - line 59: # TODO: Use the languageapi module to analyze the sentiment - line 65: # TODO: Log the sentiment score - line 71: # TODO: Assign the sentiment score to a new score property - line 77: # TODO: Use the spanner module to save the feedback - line 81: # END TODO - line 83: # TODO: Log a message to say the feedback has been saved - line 97: # TODO: Register the callback courses/machine_learning/deepdive2/feature_engineering/labs/sdk-feature-store-pandas.ipynb (10 lines): - line 324: "# TODO 1\n", - line 327: " # TODO 1: Your code goes here\n", - line 351: "# TODO 2\n", - line 354: " # TODO 2: Your code goes here\n", - line 664: "# TODO 3\n", - line 667: " # TODO 3: Your code goes here\n", - line 709: "# TODO 4\n", - line 712: " # TODO 4: Your code goes here\n", - line 866: "# TODO 5\n", - line 867: "read_instances_df = # TODO 5: Your code goes here" courses/developingapps/v1.3/java/appengine/end/function/index.js (10 lines): - line 1: // TODO: Load the ./languageapi module - line 7: // TODO: Load the ./spanner module - line 17: // TODO: Decode the Cloud Pub/Sub message - line 26: // TODO: Run Natural Language API sentiment analysis - line 33: // TODO: Log the sentiment score - line 39: // TODO: Add new score property to feedbackObject - line 45: // TODO: Pass feedback object to the next handler - line 50: }) // TODO: insert record - line 53: // TODO: Log and return success - line 60: }).catch(console.error); // TODO: Log error courses/developingapps/python/pubsub-languageapi-spanner/bonus/quiz/console/worker.py (10 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules from the quiz.gcp package - line 43: # TODO: Acknowledge the message - line 51: # TODO: Log the message - line 59: # TODO: Use the languageapi module to analyze the sentiment - line 65: # TODO: Log the sentiment score - line 71: # TODO: Assign the sentiment score to a new score property - line 77: # TODO: Use the spanner module to save the feedback - line 81: # END TODO - line 83: # TODO: Log a message to say the feedback has been saved - line 107: # TODO: Register the callbacks courses/developingapps/v1.3/nodejs/cloudfunctions/start/function/index.js (10 lines): - line 14: // TODO: Load the ./languageapi module - line 20: // TODO: Load the ./spanner module - line 29: // TODO: Decode the Cloud Pub/Sub message - line 38: // TODO: Run Natural Language API sentiment analysis - line 45: // TODO: Log the sentiment score - line 51: // TODO: Add new score property to feedbackObject - line 57: // TODO: Pass feedback object to the next handler - line 63: // TODO: insert record - line 65: // TODO: Log and return success - line 73: // TODO: Catch and Log error courses/developingapps/python/kubernetesengine/start/backend/quiz/console/worker.py (10 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules from the quiz.gcp package - line 43: # TODO: Acknowledge the message - line 51: # TODO: Log the message - line 59: # TODO: Use the languageapi module to analyze the sentiment - line 65: # TODO: Log the sentiment score - line 71: # TODO: Assign the sentiment score to a new score property - line 77: # TODO: Use the spanner module to save the feedback - line 81: # END TODO - line 83: # TODO: Log a message to say the feedback has been saved - line 97: # TODO: Register the callback courses/machine_learning/asl/05_review/labs/5_train_bqml.ipynb (10 lines): - line 267: "# TODO: Your code goes here\n", - line 321: "The cell below is missing the SQL query to examine the training statistics of our trained model. Complete the TODO below to view the results of our training job above. \n", - line 333: "# TODO: Your code goes here" - line 424: "Complete the TODO in the cell below to make predictions in BigQuery with our newly trained model `demo.babyweight_model_asis` on the `public.samples.natality` table. You'll need to preprocess the data for training by selecting only those examples which have\n", - line 456: " # TODO: Your code goes here\n", - line 664: "As in Exercise 1 above, below you are asked to complete the TODO in the cell below to train a linear regression model in BigQuery using `weight_pounds` as the label. This time, since we're using the supplemented dataset containing `without_ultrasound` data, name your model `babyweight_model_fc`. This model will reside within the `demo` dataset. \n", - line 686: "# TODO: Your code goes here\n", - line 766: "Just as in Exercise 2 above, let's plot the train and eval curve using the TRAINING_INFO from the model training job for the `babyweight_model_fc` model we trained above. Complete the TODO to create a Pandas dataframe that has the TRAINING_INFO from the training job. " - line 787: "df = # TODO: Your code goes here\n", - line 854: " # TODO: Your code goes here" courses/developingapps/v1.2/nodejs/cloudfunctions/start/function/index.js (10 lines): - line 14: // TODO: Load the ./languageapi module - line 20: // TODO: Load the ./spanner module - line 29: // TODO: Decode the Cloud Pub/Sub message - line 38: // TODO: Run Natural Language API sentiment analysis - line 45: // TODO: Log the sentiment score - line 51: // TODO: Add new score property to feedbackObject - line 57: // TODO: Pass feedback object to the next handler - line 63: // TODO: insert record - line 65: // TODO: Log and return success - line 73: // TODO: Catch and Log error courses/machine_learning/deepdive2/production_ml/labs/bqml-vertexai-model-registry.ipynb (10 lines): - line 398: "# TODO 1\n", - line 403: "# TODO 1: Your code goes here\n", - line 575: "# TODO 2\n", - line 580: "endpoint = # TODO 2: Your code goes here\n", - line 616: "# TODO 3\n", - line 620: "# TODO 3: Your code goes here\n", - line 655: "# TODO 4\n", - line 665: "prediction = # TODO 4: Your code goes here\n", - line 689: "# TODO 5\n", - line 698: "prediction_result = # TODO 5: Your code goes here" courses/machine_learning/deepdive2/text_classification/solutions/keras_for_text_classification.ipynb (10 lines): - line 476: "# TODO 1\n", - line 533: "# TODO 2\n", - line 672: " Embedding(VOCAB_SIZE + 1, embed_dim, input_shape=[MAX_LEN]), # TODO 3\n", - line 673: " Lambda(lambda x: tf.reduce_mean(x, axis=1)), # TODO 4\n", - line 674: " Dense(N_CLASSES, activation='softmax') # TODO 5\n", - line 753: " Embedding(VOCAB_SIZE + 1, embed_dim, input_shape=[MAX_LEN], mask_zero=True), # TODO 3\n", - line 754: " GRU(units), # TODO 5\n", - line 841: " mask_zero=True), # TODO 3\n", - line 842: " Conv1D( # TODO 5\n", - line 848: " Flatten(), # TODO 5\n", courses/developingapps/v1.3/python/kubernetesengine/bonus/backend/quiz/console/worker.py (10 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules from the quiz.gcp package - line 43: # TODO: Acknowledge the message - line 51: # TODO: Log the message - line 59: # TODO: Use the languageapi module to analyze the sentiment - line 65: # TODO: Log the sentiment score - line 71: # TODO: Assign the sentiment score to a new score property - line 77: # TODO: Use the spanner module to save the feedback - line 81: # END TODO - line 83: # TODO: Log a message to say the feedback has been saved - line 97: # TODO: Register the callback courses/developingapps/python/kubernetesengine/bonus/backend/quiz/console/worker.py (10 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules from the quiz.gcp package - line 43: # TODO: Acknowledge the message - line 51: # TODO: Log the message - line 59: # TODO: Use the languageapi module to analyze the sentiment - line 65: # TODO: Log the sentiment score - line 71: # TODO: Assign the sentiment score to a new score property - line 77: # TODO: Use the spanner module to save the feedback - line 81: # END TODO - line 83: # TODO: Log a message to say the feedback has been saved - line 97: # TODO: Register the callback courses/developingapps/v1.2/java/appengine/end/function/index.js (10 lines): - line 1: // TODO: Load the ./languageapi module - line 7: // TODO: Load the ./spanner module - line 17: // TODO: Decode the Cloud Pub/Sub message - line 26: // TODO: Run Natural Language API sentiment analysis - line 33: // TODO: Log the sentiment score - line 39: // TODO: Add new score property to feedbackObject - line 45: // TODO: Pass feedback object to the next handler - line 50: }) // TODO: insert record - line 53: // TODO: Log and return success - line 60: }).catch(console.error); // TODO: Log error courses/machine_learning/deepdive/02_tensorflow/labs/e_traineval.ipynb (10 lines): - line 144: " receiver_tensors = # TODO: Your code goes here\n", - line 192: " # TODO: Your code goes here\n", - line 216: " # TODO: Your code goes here\n", - line 253: " pred_values = # TODO: Your code goes here\n", - line 255: " \"rmse\": # TODO: Your code goes here\n", - line 295: " input_fn = # TODO: Your code goes here\n", - line 296: " max_steps = # TODO: Your code goes here\n", - line 319: "exporter = # TODO: Your code goes here" - line 346: "eval_spec = # TODO: Your code goes here" - line 368: "tf.estimator.train_and_evaluate(# TODO: Your code goes here\n", courses/machine_learning/deepdive/10_recommend/labs/content_based_using_neural_networks.ipynb (10 lines): - line 138: "embedded_title_column = #TODO: use a Tensorflow Hub module to create a text embeddding column for the article \"title\". \n", - line 141: "embedded_content_column = #TODO: create an embedded categorical feature column for the article id; i.e. \"content_id\".\n", - line 143: "embedded_author_column = #TODO: create an embedded categorical feature column for the article \"author\"\n", - line 145: "category_column = #TODO: create a categorical feature column for the article \"category\"\n", - line 148: "months_since_epoch_bucketized = #TODO: create a bucketized numeric feature column of values for the \"months since epoch\"\n", - line 150: "crossed_months_since_category_column = #TODO: create a crossed feature column using the \"category\" and \"months since epoch\" values\n", - line 253: " top_10_accuracy = #TODO: Compute the top_10 accuracy, using the tf.nn.in_top_k and tf.metrics.mean functions in Tensorflow\n", - line 257: " #TODO: Add top_10_accuracy to the metrics dictionary\n", - line 261: " #TODO: Add the top_10_accuracy metric to the Tensorboard summary\n", - line 348: "output = #TODO: Use the predict method on our trained model to find the predictions for the examples contained in \"first_5.csv\"." courses/developingapps/v1.3/python/pubsub-languageapi-spanner/bonus/quiz/console/worker.py (10 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules - line 44: # TODO: Acknowledge the message - line 50: # TODO: Log the message - line 59: # TODO: Use the languageapi module - line 66: # TODO: Log the sentiment score - line 72: # TODO: Assign the sentiment score to - line 79: # TODO: Use the spanner module to save the feedback - line 83: # END TODO - line 85: # TODO: Log a message to say the feedback - line 110: # TODO: Register the callbacks courses/developingapps/v1.2/python/appengine/start/function/index.js (9 lines): - line 14: // TODO: Load the ./languageapi module - line 20: // TODO: Load the ./spanner module - line 31: // TODO: Decode the Cloud Pub/Sub message - line 42: // TODO: Log the sentiment score - line 48: // TODO: Add new score property to feedbackObject - line 54: // TODO: Pass feedback object to the next handler - line 59: }) // TODO: insert record - line 62: // TODO: Log and return success - line 69: }).catch(console.error); // TODO: Log error courses/developingapps/v1.3/nodejs/pubsub-languageapi-spanner/end/server/gcp/languageapi.js (9 lines): - line 17: // TODO: Load the Natural Language ML API module - line 22: // TODO: Create a client object against the Language API - line 38: // TODO: Create an object named document with the - line 40: // TODO: Initialize object content & type properties - line 41: // TODO: Set content from text arg - line 42: // TODO: Set type to PLAIN_TEXT - line 50: // TODO: Perform sentiment detection - line 52: // TODO: Chain then - line 57: // TODO: Get the sentiment score (-1 to +1) courses/developingapps/v1.3/python/appengine/start/function/index.js (9 lines): - line 14: // TODO: Load the ./languageapi module - line 20: // TODO: Load the ./spanner module - line 31: // TODO: Decode the Cloud Pub/Sub message - line 42: // TODO: Log the sentiment score - line 48: // TODO: Add new score property to feedbackObject - line 54: // TODO: Pass feedback object to the next handler - line 59: }) // TODO: insert record - line 62: // TODO: Log and return success - line 69: }).catch(console.error); // TODO: Log error courses/machine_learning/deepdive2/building_production_ml_systems/labs/4b_streaming_data_inference_vertex.ipynb (9 lines): - line 203: "**TODO:** Open the file ./taxicab_traffic/streaming_count.py and find the TODO there. Specify a sliding window that is 5 minutes long, and gets recalculated every 15 seconds. Hint: Reference the [beam programming guide](https://beam.apache.org/documentation/programming-guide/#windowing) for guidance. To check your answer reference the solution. \n", - line 288: "# TODO 2a. Write a function to take most recent entry in `traffic_realtime` table and add it to instance.\n", - line 292: " TODO: Your code goes here\n", - line 295: " instance['traffic_last_5min'] = # TODO: Your code goes here.\n", - line 350: "# TODO 2b. Write code to call prediction on instance using realtime traffic info.\n", - line 353: "ENDPOINT_ID = # TODO: Copy the `ENDPOINT_ID` from the deployment in the previous lab.\n", - line 371: "instance_dict = # TODO: Your code goes here.\n", - line 378: "response = # TODO: Your code goes here.\n", - line 381: "print(\" prediction:\", # TODO: Your code goes here.\n" courses/developingapps/v1.2/nodejs/pubsub-languageapi-spanner/start/server/gcp/languageapi.js (9 lines): - line 17: // TODO: Load the Natural Language ML API module - line 24: // TODO: Create a client object against the Language API - line 41: // TODO: Create an object named document with the - line 45: // TODO: Initialize object content and type props - line 46: // TODO: Set content from text arg - line 47: // TODO: Set type to PLAIN_TEXT - line 52: // TODO: Perform sentiment detection - line 54: // TODO: Chain then - line 59: // TODO: Get the sentiment score (-1 to +1) courses/developingapps/java/pubsub-languageapi-spanner/start/src/main/java/com/google/training/appdev/services/gcp/pubsub/PublishService.java (9 lines): - line 41: // TODO: Declare and initialize two Strings, PROJECT_ID and TOPIC_NAME - line 52: // TODO: Create a TopicName object for the feedback topic in the project - line 58: // TODO: Declare a publisher for the topic - line 69: // TODO: Initialize the publisher using a builder and the topicName - line 75: // TODO: Copy the serialized message to a byte string - line 81: // TODO: Create a Pub/Sub message using a builder; set the message data - line 87: // TODO: Publish the message, assign to the messageIdFuture - line 95: // TODO: Get the messageId from the messageIdFuture - line 103: // TODO: Shutdown the publisher to free up resources courses/developingapps/v1.2/nodejs/pubsub-languageapi-spanner/start/server/gcp/spanner.js (9 lines): - line 15: // TODO: Import the @google-cloud/spanner module - line 21: // TODO: Create a client object to access Cloud Spanner - line 36: // TODO: Get a reference to the Cloud Spanner instance - line 42: // TODO: Get a reference to the Cloud Spanner database - line 48: // TODO: Get a reference to the Cloud Spanner table - line 59: // TODO: Declare rev_email constant - line 60: // TODO: Produce a 'reversed' email address - line 67: // TODO: Create record object to be inserted into Spanner - line 76: // TODO: Insert the record into the table using await courses/machine_learning/deepdive2/art_and_science_of_ml/solutions/hyperparameter_tuning.ipynb (9 lines): - line 760: " # TODO 1\n", - line 763: " # TODO 1\n", - line 909: " maxTrials: 10 # TODO 2\n", - line 910: " maxParallelTrials: 2 # TODO 2\n", - line 911: " hyperparameterMetricTag: rmse # TODO 2\n", - line 915: " # TODO 2\n", - line 921: " # TODO 2\n", - line 927: " # TODO 2\n", - line 1291: "# TODO 3\n", courses/developingapps/v1.2/python/appengine/end/function/index.js (9 lines): - line 14: // TODO: Load the ./languageapi module - line 20: // TODO: Load the ./spanner module - line 31: // TODO: Decode the Cloud Pub/Sub message - line 42: // TODO: Log the sentiment score - line 48: // TODO: Add new score property to feedbackObject - line 54: // TODO: Pass feedback object to the next handler - line 59: }) // TODO: insert record - line 62: // TODO: Log and return success - line 69: }).catch(console.error); // TODO: Log error courses/developingapps/python/datastore/start/quiz/gcp/datastore.py (9 lines): - line 15: # TODO: Import the os module - line 21: # TODO: Get the GCLOUD_PROJECT environment variable - line 29: # TODO: Import the datastore module from the google.cloud package - line 35: # TODO: Create a Cloud Datastore client object - line 62: # TODO: Create a key for a Datastore entity whose kind is Question - line 68: # TODO: Create a Datastore entity object using the key - line 74: # TODO: Iterate over the form values supplied to the function - line 80: # TODO: Assign each key and value to the Datastore entity - line 87: # TODO: Save the entity courses/developingapps/v1.2/java/pubsub-languageapi-spanner/start/src/main/java/com/google/training/appdev/services/gcp/pubsub/PublishService.java (9 lines): - line 41: // TODO: Declare and initialize two Strings, PROJECT_ID and TOPIC_NAME - line 52: // TODO: Create a TopicName object for the feedback topic in the project - line 58: // TODO: Declare a publisher for the topic - line 69: // TODO: Initialize the publisher using a builder and the topicName - line 75: // TODO: Copy the serialized message to a byte string - line 81: // TODO: Create a Pub/Sub message using a builder; set the message data - line 87: // TODO: Publish the message, assign to the messageIdFuture - line 95: // TODO: Get the messageId from the messageIdFuture - line 103: // TODO: Shutdown the publisher to free up resources courses/developingapps/nodejs/pubsub-languageapi-spanner/start/server/gcp/languageapi.js (9 lines): - line 15: // TODO: Load the Natural Language ML API module - line 22: // TODO: Create a client object against the Language API - line 38: // TODO: Create an object named document with the - line 42: // TODO: Initialize object content and type props - line 43: // TODO: Set content from text arg - line 44: // TODO: Set type to PLAIN_TEXT - line 49: // TODO: Perform sentiment detection - line 51: // TODO: Chain then - line 56: // TODO: Get the sentiment score (-1 to +1) courses/developingapps/v1.3/nodejs/pubsub-languageapi-spanner/start/server/gcp/languageapi.js (9 lines): - line 17: // TODO: Load the Natural Language ML API module - line 24: // TODO: Create a client object against the Language API - line 41: // TODO: Create an object named document with the - line 45: // TODO: Initialize object content and type props - line 46: // TODO: Set content from text arg - line 47: // TODO: Set type to PLAIN_TEXT - line 52: // TODO: Perform sentiment detection - line 54: // TODO: Chain then - line 59: // TODO: Get the sentiment score (-1 to +1) courses/developingapps/v1.3/nodejs/pubsub-languageapi-spanner/start/server/gcp/spanner.js (9 lines): - line 15: // TODO: Import the @google-cloud/spanner module - line 21: // TODO: Create a client object to access Cloud Spanner - line 36: // TODO: Get a reference to the Cloud Spanner instance - line 42: // TODO: Get a reference to the Cloud Spanner database - line 48: // TODO: Get a reference to the Cloud Spanner table - line 59: // TODO: Declare rev_email constant - line 60: // TODO: Produce a 'reversed' email address - line 67: // TODO: Create record object to be inserted into Spanner - line 76: // TODO: Insert the record into the table using await courses/machine_learning/deepdive2/art_and_science_of_ml/labs/distributed_training.ipynb (9 lines): - line 94: "# TODO 1\n", - line 221: "# TODO 2\n", - line 222: "# TODO -- Your code here.\n", - line 372: "# TODO 3a\n", - line 373: "# TODO -- Your code here.\n" - line 458: "# TODO 3b\n", - line 459: "# TODO -- Your code here.\n" - line 544: "# TODO 3c\n", - line 545: "# TODO -- Your code here.\n" courses/developingapps/v1.2/python/pubsub-languageapi-spanner/end/quiz/console/worker.py (9 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules from - line 45: # TODO: Acknowledge the message - line 51: # TODO: Log the message - line 60: # TODO: Use the languageapi module to - line 67: # TODO: Log the sentiment score - line 73: # TODO: Assign the sentiment score to - line 80: # TODO: Use the spanner module to save the feedback - line 86: # TODO: Log a message to say the feedback - line 101: # TODO: Register the callback courses/machine_learning/deepdive/02_tensorflow/labs/g_distributed.ipynb (9 lines): - line 79: "!gsutil -m rm -rf # TODO: Your code goes here\n", - line 80: "!gcloud ai-platform # TODO: Your code goes here\n", - line 81: " --package-path= # TODO: Your code goes here\n", - line 82: " --module-name= # TODO: Your code goes here\n", - line 83: " --job-dir= # TODO: Your code goes here\n", - line 84: " --python-version= # TODO: Your code goes here\n", - line 85: " --runtime-version= # TODO: Your code goes here\n", - line 86: " --region= # TODO: Your code goes here\n", - line 87: " --scale-tier= # TODO: Your code goes here\n", courses/developingapps/v1.3/python/pubsub-languageapi-spanner/end/quiz/console/worker.py (9 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules from - line 45: # TODO: Acknowledge the message - line 51: # TODO: Log the message - line 60: # TODO: Use the languageapi module to - line 67: # TODO: Log the sentiment score - line 73: # TODO: Assign the sentiment score to - line 80: # TODO: Use the spanner module to save the feedback - line 86: # TODO: Log a message to say the feedback - line 101: # TODO: Register the callback courses/machine_learning/deepdive2/launching_into_ml/labs/automl_text_classification.ipynb (9 lines): - line 215: "TIMESTAMP = # TODO: Your code goes here\n", - line 248: "ds = # TODO: Your code goes here(\n", - line 288: "datasets = # TODO: Your code goes here(filter=f'display_name=\"{display_name}\"')\n", - line 362: "job = # TODO: Your code goes here(\n", - line 504: "model = # TODO: Your code goes here(\n", - line 546: "models = # TODO: Your code goes here(filter=f'display_name=\"{model_display_name}\"')\n", - line 1008: "endpoint = # TODO: Your code goes here(\n", - line 1110: "response = # TODO: Your code goes here(instances=[{\"content\": content}])\n", - line 1285: "batch_prediction_job = # TODO: Your code goes here(\n", quests/getting_started_apache_beam/beam_ml_toxicity_in_gaming/exercises/part1.py (9 lines): - line 63: # TODO: Follow Step 3: Create the pipeline to read from the input topic - line 64: # TODO: Follow Step 4: Window the incoming element - line 65: # TODO: Follow Step 5: Tag your element with the key - line 70: # TODO: Follow Step 6: Create the model handler - line 75: # TODO: Follow Step 7: Submit the input to the model for a result - line 78: # TODO: Apply the correct DoFn from above as instructed in Step 8: Parse your results from the prediction - line 85: # TODO: Follow Step 9: Do a simple MAP and print - line 89: # TODO: Follow Step 10: Filter your data on the result key - line 92: # TODO: Follow Step 11: Submit the messages to Pub/Sub for further action courses/machine_learning/deepdive2/production_ml/labs/distributed_training.ipynb (9 lines): - line 94: "# TODO 1\n", - line 221: "# TODO 2\n", - line 222: "# TODO -- Your code here.\n", - line 365: "# TODO 3a\n", - line 366: "# TODO -- Your code here.\n" - line 451: "# TODO 3b\n", - line 452: "# TODO -- Your code here.\n" - line 537: "# TODO 3c\n", - line 538: "# TODO -- Your code here.\n" courses/developingapps/v1.2/nodejs/pubsub-languageapi-spanner/end/server/gcp/languageapi.js (9 lines): - line 17: // TODO: Load the Natural Language ML API module - line 22: // TODO: Create a client object against the Language API - line 38: // TODO: Create an object named document with the - line 40: // TODO: Initialize object content & type properties - line 41: // TODO: Set content from text arg - line 42: // TODO: Set type to PLAIN_TEXT - line 50: // TODO: Perform sentiment detection - line 52: // TODO: Chain then - line 57: // TODO: Get the sentiment score (-1 to +1) courses/developingapps/python/appengine/start/function/index.js (9 lines): - line 14: // TODO: Load the ./languageapi module - line 20: // TODO: Load the ./spanner module - line 31: // TODO: Decode the Cloud Pub/Sub message - line 42: // TODO: Log the sentiment score - line 48: // TODO: Add new score property to feedbackObject - line 54: // TODO: Pass feedback object to the next handler - line 59: }) // TODO: insert record - line 62: // TODO: Log and return success - line 69: }).catch(console.error); // TODO: Log error courses/machine_learning/asl/02_tensorflow/labs/g_distributed.ipynb (9 lines): - line 79: "!gsutil -m rm -rf # TODO: Your code goes here\n", - line 80: "!gcloud ai-platform # TODO: Your code goes here\n", - line 81: " --package-path= # TODO: Your code goes here\n", - line 82: " --module-name= # TODO: Your code goes here\n", - line 83: " --job-dir= # TODO: Your code goes here\n", - line 84: " --python-version= # TODO: Your code goes here\n", - line 85: " --runtime-version= # TODO: Your code goes here\n", - line 86: " --region= # TODO: Your code goes here\n", - line 87: " --scale-tier= # TODO: Your code goes here\n", courses/developingapps/nodejs/pubsub-languageapi-spanner/start/server/gcp/spanner.js (9 lines): - line 15: // TODO: Import the @google-cloud/spanner module - line 21: // TODO: Create a client object to access Cloud Spanner - line 36: // TODO: Get a reference to the Cloud Spanner instance - line 42: // TODO: Get a reference to the Cloud Spanner database - line 48: // TODO: Get a reference to the Cloud Spanner table - line 58: // TODO: Declare rev_email constant - line 59: // TODO: Produce a 'reversed' email address - line 66: // TODO: Create record object to be inserted into Spanner - line 74: // TODO: Insert the record into the table courses/developingapps/v1.3/java/pubsub-languageapi-spanner/start/src/main/java/com/google/training/appdev/services/gcp/pubsub/PublishService.java (9 lines): - line 41: // TODO: Declare and initialize two Strings, PROJECT_ID and TOPIC_NAME - line 52: // TODO: Create a TopicName object for the feedback topic in the project - line 58: // TODO: Declare a publisher for the topic - line 69: // TODO: Initialize the publisher using a builder and the topicName - line 75: // TODO: Copy the serialized message to a byte string - line 81: // TODO: Create a Pub/Sub message using a builder; set the message data - line 87: // TODO: Publish the message, assign to the messageIdFuture - line 95: // TODO: Get the messageId from the messageIdFuture - line 103: // TODO: Shutdown the publisher to free up resources courses/machine_learning/deepdive2/introduction_to_tensorflow/labs/original_1_training_at_scale_vertex.ipynb (9 lines): - line 468: "# TODO 1: Your code goes here\n", - line 471: "# TODO 2: Your code goes here\n", - line 806: " # TODO 1a\n", - line 814: " # TODO 1b\n", - line 1076: "# TODO 3: Your code goes here" - line 1211: "# TODO 4: Your code goes here" - line 1312: "# TODO 5: Your code goes here" - line 1477: "# TODO 3\n", - line 1521: "TODO: To submit to the Cloud we use [`gcloud ai custom-jobs create`](https://cloud.google.com/sdk/gcloud/reference/ai/custom-jobs/create) and simply specify some additional parameters for Vertex AI Training Service:\n", courses/developingapps/v1.2/nodejs/pubsub-languageapi-spanner/end/server/gcp/spanner.js (9 lines): - line 15: // TODO: Import the @google-cloud/spanner module - line 21: // TODO: Create a client object to access Cloud Spanner - line 35: // TODO: Get a reference to the Cloud Spanner instance - line 40: // TODO: Get a reference to the Cloud Spanner database - line 45: // TODO: Get a reference to the Cloud Spanner table - line 53: // TODO: Declare rev_email constant - line 54: // TODO: Produce a 'reversed' email address - line 64: // TODO: Create record object to be inserted into Spanner - line 77: // TODO: Insert the record into the table using await courses/developingapps/v1.3/nodejs/pubsub-languageapi-spanner/end/server/gcp/spanner.js (9 lines): - line 15: // TODO: Import the @google-cloud/spanner module - line 21: // TODO: Create a client object to access Cloud Spanner - line 35: // TODO: Get a reference to the Cloud Spanner instance - line 40: // TODO: Get a reference to the Cloud Spanner database - line 45: // TODO: Get a reference to the Cloud Spanner table - line 53: // TODO: Declare rev_email constant - line 54: // TODO: Produce a 'reversed' email address - line 64: // TODO: Create record object to be inserted into Spanner - line 77: // TODO: Insert the record into the table using await quests/serverlessml/04_keras/labs/keras_dnn.ipynb (9 lines): - line 145: "# TODO 1: Specify the LABEL_COLUMN name you are predicting for below:\n", - line 172: " # TODO 1: Complete the four tf.data.experimental.make_csv_dataset options\n", - line 203: " # TODO 2: Specify the five input columns\n", - line 221: " # TODO 2: Create two hidden layers [32,8] with relu activation. Name them h1 and h2\n", - line 227: " # TODO 2: Create an output layer with linear activation and name it 'fare'\n", - line 230: " # TODO 2: Use tf.keras.models.Model and create your model with inputs and output\n", - line 256: "# TODO 3: Use tf.keras.utils.plot_model() to create a dnn_model.png of your architecture\n", - line 288: "# TODO 4: Pass in the correct parameters to train your model\n", - line 431: "# TODO 5: Create the model using gcloud ai-platform predict\n", courses/developingapps/v1.2/python/pubsub-languageapi-spanner/start/quiz/console/worker.py (9 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules from - line 45: # TODO: Acknowledge the message - line 53: # TODO: Log the message - line 61: # TODO: Use the languageapi module to - line 68: # TODO: Log the sentiment score - line 74: # TODO: Assign the sentiment score to - line 81: # TODO: Use the spanner module to save the feedback - line 87: # TODO: Log a message to say the feedback - line 102: # TODO: Register the callback courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/tensors-variables.ipynb (9 lines): - line 182: "# TODO 1a\n", - line 337: "# TODO 1b\n", - line 365: "# TODO 1c\n", - line 508: "# TODO 1d\n", - line 1031: "# TODO 2a\n", - line 1340: "# TODO 2b\n", - line 1667: "# TODO 2c\n", - line 2095: "# TODO 2d\n", - line 2157: "# TODO 3a\n", courses/machine_learning/deepdive2/structured/labs/4a_sample_babyweight.ipynb (9 lines): - line 106: "# TODO: Change environment variables\n", - line 539: "every_n = # TODO: Experiment with values to get close to target counts\n", - line 541: "# TODO: Replace FUNC with correct function to split with\n", - line 542: "# TODO: Replace COLUMN with correct column to split on\n", - line 669: " # TODO: Filter out what we don\"t want to use for training\n", - line 672: " # TODO: Modify plurality field to be a string\n", - line 675: " # TODO: Clone data and mask certain columns to simulate lack of ultrasound\n", - line 677: " # TODO: Modify is_male\n", - line 679: " # TODO: Modify plurality\n", courses/developingapps/v1.3/python/appengine/end/function/index.js (9 lines): - line 14: // TODO: Load the ./languageapi module - line 20: // TODO: Load the ./spanner module - line 31: // TODO: Decode the Cloud Pub/Sub message - line 42: // TODO: Log the sentiment score - line 48: // TODO: Add new score property to feedbackObject - line 54: // TODO: Pass feedback object to the next handler - line 59: }) // TODO: insert record - line 62: // TODO: Log and return success - line 69: }).catch(console.error); // TODO: Log error courses/developingapps/python/appengine/end/function/index.js (9 lines): - line 14: // TODO: Load the ./languageapi module - line 20: // TODO: Load the ./spanner module - line 31: // TODO: Decode the Cloud Pub/Sub message - line 42: // TODO: Log the sentiment score - line 48: // TODO: Add new score property to feedbackObject - line 54: // TODO: Pass feedback object to the next handler - line 59: }) // TODO: insert record - line 62: // TODO: Log and return success - line 69: }).catch(console.error); // TODO: Log error courses/developingapps/v1.2/python/datastore/start/quiz/gcp/datastore.py (9 lines): - line 15: # TODO: Import the os module - line 21: # TODO: Get the GCLOUD_PROJECT environment variable - line 29: # TODO: Import the datastore module from the google.cloud package - line 35: # TODO: Create a Cloud Datastore client object - line 62: # TODO: Create a key for a Datastore entity whose kind is Question - line 68: # TODO: Create a Datastore entity object using the key - line 74: # TODO: Iterate over the form values supplied to the function - line 80: # TODO: Assign each key and value to the Datastore entity - line 87: # TODO: Save the entity courses/machine_learning/deepdive2/structured/labs/5b_deploy_keras_ai_platform_babyweight.ipynb (9 lines): - line 78: "PROJECT = \"cloud-training-demos\" # TODO: Replace with your PROJECT\n", - line 80: "REGION = \"us-central1\" # TODO: Replace with your REGION" - line 119: "!gsutil cp -r ../babyweight gs:// # TODO: Replace with your bucket-name" - line 162: "MODEL_LOCATION=# TODO: Add GCS path to saved_model.pb file.\n", - line 202: "MODEL_NAME = # TODO: Add model name\n", - line 203: "MODEL_VERSION = # TODO: Add model version\n", - line 229: " # TODO: Create another instance\n", - line 280: " --version=# TODO: Add model version" - line 310: " --version=# TODO: Add model version" courses/developingapps/v1.3/python/datastore/start/quiz/gcp/datastore.py (9 lines): - line 15: # TODO: Import the os module - line 21: # TODO: Get the GCLOUD_PROJECT environment variable - line 29: # TODO: Import the datastore module from the google.cloud package - line 35: # TODO: Create a Cloud Datastore client object - line 62: # TODO: Create a key for a Datastore entity whose kind is Question - line 68: # TODO: Create a Datastore entity object using the key - line 74: # TODO: Iterate over the form values supplied to the function - line 80: # TODO: Assign each key and value to the Datastore entity - line 87: # TODO: Save the entity courses/developingapps/v1.3/python/pubsub-languageapi-spanner/start/quiz/console/worker.py (9 lines): - line 20: # TODO: Load the pubsub, languageapi and spanner modules from - line 45: # TODO: Acknowledge the message - line 53: # TODO: Log the message - line 61: # TODO: Use the languageapi module to - line 68: # TODO: Log the sentiment score - line 74: # TODO: Assign the sentiment score to - line 81: # TODO: Use the spanner module to save the feedback - line 87: # TODO: Log a message to say the feedback - line 102: # TODO: Register the callback quests/endtoendml/labs/3_keras_dnn.ipynb (9 lines): - line 21: "__TODO__: Complete the lab notebook #TODO sections. You can refer to the [../solutions/3_keras_dnn.ipynb](solutions/) notebook for reference. \n" - line 158: " # TODO create the dataset\n", - line 162: " # TODO add shuffling to the dataset if it's in training mode:\n", - line 198: " for colname in [] # TODO complete array of numeric input columns\n", - line 202: " for colname in [] # TODO complete array of string input columns \n", - line 208: " for colname in [] # TODO complete array of numeric feature columns\n", - line 217: " # TODO: Use the feature columns and inputs above to create a DNN of [64, 32]\n", - line 421: "# TODO create model on Cloud AI Platform. Use python-version 3.5 and runtime-version 1.14\n", - line 424: "gcloud ai-platform versions create # TODO complete the statement" courses/developingapps/v1.3/python/pubsub-languageapi-spanner/bonus/quiz/gcp/pubsub.py (8 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 46: # TODO: Create a Subscription object named - line 63: # TODO: Publish the feedback object to the feedback topic - line 79: # TODO: Subscriber to the worker-subscription, - line 108: # TODO: Subscriber to the worker-subscription, courses/machine_learning/deepdive2/building_production_ml_systems/labs/2_hyperparameter_tuning_vertex.ipynb (8 lines): - line 406: "# TODO 1\n", - line 407: "hpt = # TODO: Your code goes here\n", - line 413: " # TODO: Your code goes here\n", - line 650: " - metricId: # TODO: Your code goes here\n", - line 654: " # TODO: Your code goes here\n", - line 656: " # TODO: Your code goes here\n", - line 658: " # TODO: Your code goes here\n", - line 698: " # TODO: Your code goes here" courses/developingapps/python/firebase/end/quiz/gcp/storage.py (8 lines): - line 18: # TODO: Get the Bucket name from the GCLOUD_BUCKET environment variable - line 24: # TODO: Import the storage module - line 30: # TODO: Create a client for Cloud Storage - line 36: # TODO: Use the client to get the Cloud Storage bucket - line 48: # TODO: Use the bucket to get a blob object - line 54: # TODO: Use the blob to upload the file - line 62: # TODO: Make the object public - line 70: # TODO: Modify to return the blob's Public URL courses/developingapps/python/cloudstorage/end/quiz/gcp/storage.py (8 lines): - line 18: # TODO: Get the Bucket name from the GCLOUD_BUCKET environment variable - line 24: # TODO: Import the storage module - line 30: # TODO: Create a client for Cloud Storage - line 36: # TODO: Use the client to get the Cloud Storage bucket - line 48: # TODO: Use the bucket to get a blob object - line 54: # TODO: Use the blob to upload the file - line 62: # TODO: Make the object public - line 70: # TODO: Modify to return the blob's Public URL courses/machine_learning/deepdive/01_bigquery/labs/a_sample_explore_clean.ipynb (8 lines): - line 200: "# TODO: Your code goes here" - line 231: "Store the results of the query you created in the previous TODO above in a Pandas DataFrame called `trips`.\n", - line 245: "bq = # TODO: Your code goes here\n", - line 248: "# TODO: Your code goes here\n", - line 251: "trips = # TODO: Your code goes here" - line 376: "# TODO: Your code goes here" - line 453: " TODO: Your code goes here\n", - line 454: " TODO: Your code goes here\n", courses/developingapps/v1.2/python/cloudstorage/end/quiz/gcp/storage.py (8 lines): - line 18: # TODO: Get the Bucket name from the GCLOUD_BUCKET environment variable - line 24: # TODO: Import the storage module - line 30: # TODO: Create a client for Cloud Storage - line 36: # TODO: Use the client to get the Cloud Storage bucket - line 48: # TODO: Use the bucket to get a blob object - line 54: # TODO: Use the blob to upload the file - line 62: # TODO: Make the object public - line 70: # TODO: Modify to return the blob's Public URL courses/machine_learning/deepdive/10_recommend/endtoend/airflow/dags/training.py (8 lines): - line 52: # TODO: Specify your BigQuery dataset name and table name - line 57: # TODO: Confirm bucket name and region - line 81: # TODO: Specify a schedule interval in CRON syntax to run once a day at 2100 hours (9pm) - line 85: # TODO: Title your DAG to be recommendations_training_v1 - line 122: # TODO: Complete the BigQueryOperator task to truncate the table if it already exists before writing - line 133: # TODO: Fill in the missing operator name for task #2 which - line 156: # TODO: Fill in the missing operator name for task #3 which will - line 184: # TODO: Be sure to set_upstream dependencies for all tasks courses/developingapps/python/firebase/start/quiz/gcp/storage.py (8 lines): - line 18: # TODO: Get the Bucket name from the GCLOUD_BUCKET environment variable - line 24: # TODO: Import the storage module - line 30: # TODO: Create a client for Cloud Storage - line 36: # TODO: Use the client to get the Cloud Storage bucket - line 48: # TODO: Use the bucket to get a blob object - line 54: # TODO: Use the blob to upload the file - line 62: # TODO: Make the object public - line 70: # TODO: Modify to return the blob's Public URL courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/original_1_training_at_scale_vertex.ipynb (8 lines): - line 461: "# TODO 1 and TODO 2\n", - line 814: " # TODO 1a\n", - line 822: " # TODO 1b\n", - line 1073: "# TODO 3\n", - line 1227: "# TODO 4\n", - line 1288: "# TODO 5\n", - line 1504: "# TODO 3\n", - line 1548: "TODO: To submit to the Cloud we use [`gcloud ai custom-jobs create`](https://cloud.google.com/sdk/gcloud/reference/ai/custom-jobs/create) and simply specify some additional parameters for Vertex AI Training Service:\n", quests/endtoendml/solutions/labs/3_tensorflow.ipynb (8 lines): - line 164: " # TODO #1: Use tf.decode_csv to parse the provided line\n", - line 165: " # TODO #2: Make a Python dict. The keys are the column names, the values are from the parsed data\n", - line 166: " # TODO #3: Return a tuple of features, label where features is a Python dict and label a float\n", - line 169: " # TODO #4: Use tf.gfile.Glob to create list of files that match pattern\n", - line 176: " # TODO #5: In training mode, shuffle the dataset and repeat indefinitely\n", - line 280: " # TODO #1: Create your estimator\n", - line 283: " # TODO #2: Call read_dataset passing in the training CSV file and the appropriate mode\n", - line 288: " # TODO #3: Call read_dataset passing in the evaluation CSV file and the appropriate mode\n", courses/developingapps/python/pubsub-languageapi-spanner/bonus/quiz/gcp/pubsub.py (8 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 46: # TODO: Create a Subscription object named worker-subscription - line 62: # TODO: Publish the feedback object to the feedback topic - line 78: # TODO: Subscriber to the worker-subscription, - line 107: # TODO: Subscriber to the worker-subscription, courses/machine_learning/deepdive2/production_ml/labs/comparing_pipeline_runs.ipynb (8 lines): - line 304: "# TODO 1\n", - line 306: "# TODO 1: Your code goes here" - line 775: "# TODO 2\n", - line 777: "compiler.Compiler().compile(# TODO 2: Your code goes here)" - line 1256: "# TODO 3\n", - line 1271: " # TODO 3: Your code goes here" - line 1511: "# TODO 4\n", - line 1514: "job = # TODO 4: Your code goes here\n", courses/developingapps/v1.3/python/cloudstorage/start/quiz/gcp/storage.py (8 lines): - line 18: # TODO: Get the Bucket name from the GCLOUD_BUCKET environment variable - line 24: # TODO: Import the storage module - line 30: # TODO: Create a client for Cloud Storage - line 36: # TODO: Use the client to get the Cloud Storage bucket - line 50: # TODO: Use the bucket to get a blob object - line 56: # TODO: Use the blob to upload the file - line 63: # TODO: Make the object public - line 71: # TODO: Modify to return the blob's Public URL courses/machine_learning/deepdive2/introduction_to_tensorflow/labs/spam_comments_model_maker.ipynb (8 lines): - line 158: "# TODO 1\n", - line 163: "spec = # TODO 1: Your code goes here\n", - line 208: "# TODO 2\n", - line 210: "data = # TODO 2: Your code goes here(\n", - line 256: "# TODO 3\n", - line 258: "model = # TODO 3: Your code goes here" - line 436: "# TODO 4\n", - line 439: "# TODO 4: Your code goes here" courses/machine_learning/deepdive2/machine_learning_in_the_enterprise/labs/gapic-vizier-multi-objective-optimization.ipynb (8 lines): - line 314: "metric_y1 = # TODO -- Your code goes here\n", - line 317: "metric_y2 = # TODO -- Your code goes here\n", - line 363: "vizier_client = # TODO -- Your code goes here(\n", - line 409: " y1 = # TODO -- Your code goes here(r, theta)\n", - line 410: " y2 = # TODO -- Your code goes here(r, theta)\n", - line 531: " # TODO -- Your code goes here(\n", - line 535: " \"metrics\": # TODO -- Your code goes here(suggested_trial.name, r, theta)\n", - line 702: "optimal_trials = # TODO -- Your code goes here({\"parent\": STUDY_ID})\n", courses/machine_learning/deepdive2/end_to_end_ml/solutions/deploy_keras_ai_platform_babyweight.ipynb (8 lines): - line 96: "PROJECT = \"your-project-name-here\" # TODO 1 Replace with your PROJECT\n", - line 98: "REGION = \"us-central1\" # TODO 1 Replace with your REGION" - line 223: "MODEL_LOCATION=$(gsutil ls -ld -- gs://${BUCKET}/babyweight/trained_model/2* | tail -1 | tr -d '[:space:]') # TODO 2\n", - line 269: "MODEL_NAME = \"babyweight\" # TODO 3a\n", - line 270: "MODEL_VERSION = \"ml_on_gcp\" # TODO 3a\n", - line 297: " # TODO 3a\n", - line 378: " --version=ml_on_gcp # TODO 3b" - line 434: " --version=ml_on_gcp # TODO 4" courses/developingapps/v1.2/python/firebase/end/quiz/gcp/storage.py (8 lines): - line 18: # TODO: Get the Bucket name from the GCLOUD_BUCKET environment variable - line 24: # TODO: Import the storage module - line 30: # TODO: Create a client for Cloud Storage - line 36: # TODO: Use the client to get the Cloud Storage bucket - line 48: # TODO: Use the bucket to get a blob object - line 54: # TODO: Use the blob to upload the file - line 62: # TODO: Make the object public - line 70: # TODO: Modify to return the blob's Public URL courses/machine_learning/asl/01_bigquery/labs/a_sample_explore_clean.ipynb (8 lines): - line 200: "# TODO: Your code goes here" - line 231: "Store the results of the query you created in the previous TODO above in a Pandas DataFrame called `trips`.\n", - line 245: "bq = # TODO: Your code goes here\n", - line 248: "# TODO: Your code goes here\n", - line 251: "trips = # TODO: Your code goes here" - line 376: "# TODO: Your code goes here" - line 453: " TODO: Your code goes here\n", - line 454: " TODO: Your code goes here\n", courses/developingapps/python/cloudstorage/start/quiz/gcp/storage.py (8 lines): - line 18: # TODO: Get the Bucket name from the GCLOUD_BUCKET environment variable - line 24: # TODO: Import the storage module - line 30: # TODO: Create a client for Cloud Storage - line 36: # TODO: Use the client to get the Cloud Storage bucket - line 50: # TODO: Use the bucket to get a blob object - line 56: # TODO: Use the blob to upload the file - line 63: # TODO: Make the object public - line 71: # TODO: Modify to return the blob's Public URL courses/machine_learning/deepdive2/building_production_ml_systems/labs/4b_streaming_data_inference.ipynb (8 lines): - line 205: "**TODO:** Open the file ./taxicab_traffic/streaming_count.py and find the TODO there. Specify a sliding window that is 5 minutes long, and gets recalculated every 15 seconds. Hint: Reference the [beam programming guide](https://beam.apache.org/documentation/programming-guide/#windowing) for guidance. To check your answer reference the solution. \n", - line 299: "# TODO 2a. Write a function to take most recent entry in `traffic_realtime` table and add it to instance.\n", - line 303: " TODO: Your code goes here\n", - line 306: " instance['traffic_last_5min'] = # TODO: Your code goes here.\n", - line 354: "# TODO 2b. Write code to call prediction on instance using realtime traffic info.\n", - line 373: "instance = # TODO: Your code goes here.\n", - line 375: "response = # TODO: Your code goes here.\n", - line 380: " print( # TODO: Your code goes here" courses/developingapps/v1.3/python/firebase/start/quiz/gcp/storage.py (8 lines): - line 18: # TODO: Get the Bucket name from the GCLOUD_BUCKET environment variable - line 24: # TODO: Import the storage module - line 30: # TODO: Create a client for Cloud Storage - line 36: # TODO: Use the client to get the Cloud Storage bucket - line 48: # TODO: Use the bucket to get a blob object - line 54: # TODO: Use the blob to upload the file - line 62: # TODO: Make the object public - line 70: # TODO: Modify to return the blob's Public URL courses/developingapps/v1.2/python/cloudstorage/start/quiz/gcp/storage.py (8 lines): - line 18: # TODO: Get the Bucket name from the GCLOUD_BUCKET environment variable - line 24: # TODO: Import the storage module - line 30: # TODO: Create a client for Cloud Storage - line 36: # TODO: Use the client to get the Cloud Storage bucket - line 50: # TODO: Use the bucket to get a blob object - line 56: # TODO: Use the blob to upload the file - line 63: # TODO: Make the object public - line 71: # TODO: Modify to return the blob's Public URL courses/developingapps/v1.3/python/kubernetesengine/bonus/frontend/quiz/gcp/pubsub.py (8 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 46: # TODO: Create a Subscription object named worker-subscription - line 62: # TODO: Publish the feedback object to the feedback topic - line 78: # TODO: Subscriber to the worker-subscription, - line 107: # TODO: Subscriber to the worker-subscription, courses/developingapps/v1.2/python/pubsub-languageapi-spanner/bonus/quiz/gcp/pubsub.py (8 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 46: # TODO: Create a Subscription object named - line 63: # TODO: Publish the feedback object to the feedback topic - line 79: # TODO: Subscriber to the worker-subscription, - line 108: # TODO: Subscriber to the worker-subscription, courses/machine_learning/deepdive/10_recommend/endtoend/endtoend.ipynb (8 lines): - line 577: "# TODO: Specify your BigQuery dataset name and table name\n", - line 582: "# TODO: Confirm bucket name and region\n", - line 606: "# TODO: Specify a schedule interval in CRON syntax to run once a day at 2100 hours (9pm)\n", - line 610: "# TODO: Title your DAG to be recommendations_training_v1\n", - line 647: "# TODO: Complete the BigQueryOperator task to truncate the table if it already exists before writing\n", - line 658: "# TODO: Fill in the missing operator name for task #2 which\n", - line 681: "# TODO: Fill in the missing operator name for task #3 which will\n", - line 709: "# TODO: Be sure to set_upstream dependencies for all tasks\n", courses/developingapps/v1.2/python/kubernetesengine/bonus/frontend/quiz/gcp/pubsub.py (8 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 46: # TODO: Create a Subscription object named worker-subscription - line 62: # TODO: Publish the feedback object to the feedback topic - line 78: # TODO: Subscriber to the worker-subscription, - line 107: # TODO: Subscriber to the worker-subscription, courses/machine_learning/deepdive/10_recommend/labs/endtoend/airflow/dags/training.py (8 lines): - line 52: # TODO: Specify your BigQuery dataset name and table name - line 57: # TODO: Confirm bucket name and region - line 81: # TODO: Specify a schedule interval in CRON syntax to run once a day at 2100 hours (9pm) - line 85: # TODO: Title your DAG to be recommendations_training_v1 - line 122: # TODO: Complete the BigQueryOperator task to truncate the table if it already exists before writing - line 133: # TODO: Fill in the missing operator name for task #2 which - line 156: # TODO: Fill in the missing operator name for task #3 which will - line 184: # TODO: Be sure to set_upstream dependencies for all tasks courses/developingapps/v1.2/python/firebase/start/quiz/gcp/storage.py (8 lines): - line 18: # TODO: Get the Bucket name from the GCLOUD_BUCKET environment variable - line 24: # TODO: Import the storage module - line 30: # TODO: Create a client for Cloud Storage - line 36: # TODO: Use the client to get the Cloud Storage bucket - line 48: # TODO: Use the bucket to get a blob object - line 54: # TODO: Use the blob to upload the file - line 62: # TODO: Make the object public - line 70: # TODO: Modify to return the blob's Public URL courses/developingapps/v1.3/python/cloudstorage/end/quiz/gcp/storage.py (8 lines): - line 18: # TODO: Get the Bucket name from the GCLOUD_BUCKET environment variable - line 24: # TODO: Import the storage module - line 30: # TODO: Create a client for Cloud Storage - line 36: # TODO: Use the client to get the Cloud Storage bucket - line 48: # TODO: Use the bucket to get a blob object - line 54: # TODO: Use the blob to upload the file - line 62: # TODO: Make the object public - line 70: # TODO: Modify to return the blob's Public URL courses/machine_learning/deepdive2/production_ml/labs/custom_training_tensorboard_profiler.ipynb (8 lines): - line 382: "# TODO 1\n", - line 384: "# TODO 1: Your code goes here" - line 663: "# TODO 2\n", - line 664: "tensorboard = # TODO 2: Your code goes here" - line 1029: "# TODO 3\n", - line 1031: "# TODO 3: Your code goes here" - line 1142: "# TODO 4\n", - line 1145: "# TODO 4: Your code goes here\n", courses/machine_learning/deepdive2/end_to_end_ml/labs/deploy_keras_ai_platform_babyweight.ipynb (8 lines): - line 87: "PROJECT = \"cloud-training-demos\" # TODO 1: Replace with your PROJECT\n", - line 89: "REGION = \"us-central1\" # TODO 1: Replace with your REGION" - line 214: "MODEL_LOCATION=# TODO 2: Add GCS path to saved_model.pb file.\n", - line 262: "MODEL_NAME = # TODO 3a: Add model name\n", - line 263: "MODEL_VERSION = # TODO 3a: Add model version\n", - line 289: " # TODO 3a: Create another instance\n", - line 365: " --version=# TODO 3b: Add model version" - line 421: " --version=# TODO 4: Add model version" courses/developingapps/v1.3/python/firebase/end/quiz/gcp/storage.py (8 lines): - line 18: # TODO: Get the Bucket name from the GCLOUD_BUCKET environment variable - line 24: # TODO: Import the storage module - line 30: # TODO: Create a client for Cloud Storage - line 36: # TODO: Use the client to get the Cloud Storage bucket - line 48: # TODO: Use the bucket to get a blob object - line 54: # TODO: Use the blob to upload the file - line 62: # TODO: Make the object public - line 70: # TODO: Modify to return the blob's Public URL courses/machine_learning/deepdive/06_structured/labs/3_tensorflow.ipynb (8 lines): - line 164: " # TODO #1: Use tf.decode_csv to parse the provided line\n", - line 165: " # TODO #2: Make a Python dict. The keys are the column names, the values are from the parsed data\n", - line 166: " # TODO #3: Return a tuple of features, label where features is a Python dict and label a float\n", - line 169: " # TODO #4: Use tf.gfile.Glob to create list of files that match pattern\n", - line 176: " # TODO #5: In training mode, shuffle the dataset and repeat indefinitely\n", - line 280: " # TODO #1: Create your estimator\n", - line 283: " # TODO #2: Call read_dataset passing in the training CSV file and the appropriate mode\n", - line 288: " # TODO #3: Call read_dataset passing in the evaluation CSV file and the appropriate mode\n", courses/machine_learning/deepdive2/image_classification/labs/3_tf_hub_transfer_learning.ipynb (8 lines): - line 191: "**TODO 1.a:** Run the `decode_img` function and plot it to see a happy looking daisy." - line 205: "# TODO: decode image and plot it" - line 240: "**TODO 1.b:** Augment the image using the random functions." - line 257: " # TODO: augment the image.\n", - line 322: "**TODO 1.c:** Run the below cell repeatedly to see the results of different batches. The images have been un-normalized for human eyes. Can you tell what type of flowers they are? Is it fair for the AI to learn on?" - line 354: "**TODO 2.a** Copy over the most accurate model from 2_mnist_models.ipynb or build a new CNN Keras model." - line 376: " # TODO: Add your image model.\n", - line 416: "**TODO 2.b**: Add a Hub Keras Layer at the top of the model using the handle provided." courses/developingapps/python/kubernetesengine/bonus/frontend/quiz/gcp/pubsub.py (8 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 46: # TODO: Create a Subscription object named worker-subscription - line 62: # TODO: Publish the feedback object to the feedback topic - line 78: # TODO: Subscriber to the worker-subscription, - line 107: # TODO: Subscriber to the worker-subscription, courses/machine_learning/deepdive/10_recommend/labs/endtoend/endtoend.ipynb (8 lines): - line 577: "# TODO: Specify your BigQuery dataset name and table name\n", - line 582: "# TODO: Confirm bucket name and region\n", - line 606: "# TODO: Specify a schedule interval in CRON syntax to run once a day at 2100 hours (9pm)\n", - line 610: "# TODO: Title your DAG to be recommendations_training_v1\n", - line 647: "# TODO: Complete the BigQueryOperator task to truncate the table if it already exists before writing\n", - line 658: "# TODO: Fill in the missing operator name for task #2 which\n", - line 681: "# TODO: Fill in the missing operator name for task #3 which will\n", - line 709: "# TODO: Be sure to set_upstream dependencies for all tasks\n", courses/machine_learning/deepdive2/time_series_prediction/labs/3_modeling_bqml.ipynb (8 lines): - line 160: " # TODO: Your code goes here\n", - line 163: " # TODO: Your code goes here\n", - line 167: " # TODO: Your code goes here" - line 198: " # TODO: Your code goes here.\n", - line 228: " # TODO: Your code goes here" - line 320: " # TODO: Your code goes here\n", - line 323: " # TODO: Your code goes here\n", - line 327: " # TODO: Your code goes here" courses/developingapps/v1.2/python/pubsub-languageapi-spanner/end/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create Topic Object to reference feedback topic - line 39: # TODO: Create a Pub/Sub Subscriber Client - line 45: # TODO: Create a Subscription object named - line 61: # TODO: Publish the feedback object to the feedback topic - line 78: # TODO: Subscriber to the worker-subscription, courses/developingapps/v1.3/python/appengine/end/frontend/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 45: # TODO: Create a Subscription object named worker-subscription - line 60: # TODO: Publish the feedback object to the feedback topic - line 76: # TODO: Subscriber to the worker-subscription, courses/developingapps/v1.2/python/appengine/end/frontend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/machine_learning/deepdive2/structured/labs/1a_explore_data_babyweight.ipynb (7 lines): - line 248: "# TODO: Create function that gets distinct value statistics from BigQuery\n", - line 280: "

# TODO: Reusing the get_distinct_values function you just implemented, create function that plots distinct value statistics from BigQuery\n", - line 297: "# TODO: Create function that plots distinct value statistics from BigQuery\n", - line 330: "# TODO: Plot is_male" - line 353: "# TODO: Plot mother_age" - line 376: "# TODO: Plot plurality" - line 399: "# TODO: Plot gestation_weeks" courses/developingapps/v1.2/python/kubernetesengine/bonus/backend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/python/kubernetesengine/bonus/backend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/machine_learning/deepdive/10_recommend/labs/composer_gcf_trigger/simple_load_dag.py (7 lines): - line 54: # TODO: Populate the models.Variable.get() with the actual variable name for your output bucket - line 68: # TODO: Populate the models.Variable.get() with the variable name for your GCP Project - line 73: # TODO: Populate the models.Variable.get() with the variable name for temp location - line 115: # TODO: Name the DAG id GcsToBigQueryTriggered - line 123: # TODO: Populate the models.Variable.get() with the variable name for BQ table - line 126: # TODO: Populate the models.Variable.get() with the variable name for input field names - line 132: # TODO: Specify the type of operator we need to call to invoke DataFlow courses/developingapps/python/appengine/end/frontend/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 45: # TODO: Create a Subscription object named worker-subscription - line 60: # TODO: Publish the feedback object to the feedback topic - line 76: # TODO: Subscriber to the worker-subscription, courses/developingapps/v1.2/python/kubernetesengine/end/backend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/v1.3/python/pubsub-languageapi-spanner/end/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create Topic Object to reference feedback topic - line 39: # TODO: Create a Pub/Sub Subscriber Client - line 45: # TODO: Create a Subscription object named - line 61: # TODO: Publish the feedback object to the feedback topic - line 78: # TODO: Subscriber to the worker-subscription, courses/developingapps/python/appengine/end/frontend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/v1.3/python/kubernetesengine/bonus/frontend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/python/kubernetesengine/end/backend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/v1.2/python/pubsub-languageapi-spanner/end/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 66: # TODO: Create a key for the record - line 76: # TODO: Use the batch to insert a record courses/developingapps/v1.3/python/kubernetesengine/start/frontend/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 45: # TODO: Create a Subscription object named worker-subscription - line 60: # TODO: Publish the feedback object to the feedback topic - line 76: # TODO: Subscriber to the worker-subscription, courses/developingapps/v1.3/python/pubsub-languageapi-spanner/start/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create Topic Object to reference feedback topic - line 39: # TODO: Create a Pub/Sub Subscriber Client - line 45: # TODO: Create a Subscription object named - line 61: # TODO: Publish the feedback object to the feedback topic - line 74: # TODO: Subscriber to the worker-subscription, courses/developingapps/python/kubernetesengine/end/backend/start/backend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/v1.2/python/pubsub-languageapi-spanner/bonus/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/machine_learning/deepdive2/launching_into_ml/labs/bigquery.ipynb (7 lines): - line 389: " dataset = # TODO -- Your code goes here\n", - line 416: " load_job = # TODO -- Your code goes here(\n", - line 470: "TODO: replace \\ with your PROJECT_ID\n", - line 714: " dataset = # TODO -- Your code goes here\n", - line 716: " transformed_ds = # TODO -- Your code goes here(transform_row)\n", - line 977: "# TODO -- Your code goes here(training_ds, epochs=5)" - line 1024: "loss, accuracy = # TODO -- Your code goes here(eval_ds)\n", courses/developingapps/v1.3/python/kubernetesengine/end/frontend/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 45: # TODO: Create a Subscription object named worker-subscription - line 60: # TODO: Publish the feedback object to the feedback topic - line 76: # TODO: Subscriber to the worker-subscription, courses/developingapps/python/pubsub-languageapi-spanner/bonus/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/v1.2/python/appengine/start/frontend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/v1.2/python/kubernetesengine/start/backend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/java/pubsub-languageapi-spanner/start/src/main/java/com/google/training/appdev/services/gcp/spanner/SpannerService.java (7 lines): - line 32: // TODO: Get a reference to the Spanner API - line 39: // TODO: Get a reference to the quiz-instance and its quiz-database - line 45: // TODO: Get a client for the quiz-database - line 51: // TODO: Create a list to hold mutations against the database - line 57: // TODO: Add an insert mutation - line 60: // TODO: Build a new insert mutation - line 72: // TODO: Write the change to Spanner courses/machine_learning/deepdive/10_recommend/labs/composer_gcf_trigger/composertriggered.ipynb (7 lines): - line 232: "# TODO: Populate the models.Variable.get() with the actual variable name for your output bucket\n", - line 245: " # TODO: Populate the models.Variable.get() with the variable name for your GCP Project\n", - line 250: " # TODO: Populate the models.Variable.get() with the variable name for temp location\n", - line 292: "# TODO: Name the DAG id GcsToBigQueryTriggered\n", - line 300: " # TODO: Populate the models.Variable.get() with the variable name for BQ table\n", - line 303: " # TODO: Populate the models.Variable.get() with the variable name for input field names\n", - line 309: " # TODO: Specify the type of operator we need to call to invoke DataFlow\n", courses/developingapps/python/kubernetesengine/start/frontend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/machine_learning/deepdive2/production_ml/labs/keras.ipynb (7 lines): - line 165: "# TODO: Your code goes here\n", - line 241: "# TODO: Your code goes here\n" - line 419: "# TODO: Your code goes here\n" - line 456: "# TODO: Your code goes here\n", - line 458: "# TODO: Your code goes here\n" - line 543: "# TODO: Your code goes here\n" - line 875: "# TODO: Your code goes here\n" courses/developingapps/v1.3/python/appengine/end/frontend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/python/kubernetesengine/start/frontend/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 45: # TODO: Create a Subscription object named worker-subscription - line 60: # TODO: Publish the feedback object to the feedback topic - line 76: # TODO: Subscriber to the worker-subscription, courses/developingapps/v1.2/python/kubernetesengine/start/frontend/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 45: # TODO: Create a Subscription object named worker-subscription - line 60: # TODO: Publish the feedback object to the feedback topic - line 76: # TODO: Subscriber to the worker-subscription, courses/developingapps/python/kubernetesengine/end/backend/start/frontend/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 45: # TODO: Create a Subscription object named worker-subscription - line 60: # TODO: Publish the feedback object to the feedback topic - line 76: # TODO: Subscriber to the worker-subscription, courses/machine_learning/asl/03_model_performance/labs/c_custom_keras_estimator.ipynb (7 lines): - line 210: " # TODO: Your code goes here\n", - line 211: " # TODO: Your code goes here\n", - line 212: " # TODO: Your code goes here\n", - line 213: " # TODO: Your code goes here\n", - line 214: " # TODO: Your code goes here\n", - line 215: " # TODO: Your code goes here\n", - line 290: " # TODO: Your code goes here\n", courses/machine_learning/deepdive2/introduction_to_tensorflow/labs/quickstart.ipynb (7 lines): - line 301: "# TODO 1\n", - line 308: "# TODO - Your code goes here" - line 407: "# TODO 2\n", - line 419: "task = # TODO - Your code goes here" - line 457: "# TODO 3\n", - line 463: "# TODO - Your code goes here\n", - line 466: "index = # TODO - Your code goes here\n", courses/developingapps/v1.2/python/pubsub-languageapi-spanner/start/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a reference to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 66: # TODO: Create a key for the record - line 74: # TODO: Use the batch to insert a record quests/serverlessml/05_feateng/labs/feateng_bqml.ipynb (7 lines): - line 133: "# placeholder for additional filters as part of TODO 3 later" - line 152: "# TODO 1: Specify the BigQuery ML options for a linear model to predict fare amount\n", - line 191: "# TODO 2: Evaluate and predict with the linear model\n", - line 204: "### TODO 3: Apply transformations using SQL to prune the taxi cab dataset\n", - line 275: " # TODO 4: Create a feature cross for day-hour combination using SQL \n", - line 333: "# TODO 5: Set the model options for a linear regression model to predict fare amount with 0.1 L2 Regularization\n", - line 472: "# TODO 6: Create a DNN model (dnn_regressor) with hidden_units [32,8]\n", courses/developingapps/v1.3/python/kubernetesengine/start/backend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/python/appengine/start/frontend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/python/pubsub-languageapi-spanner/end/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 45: # TODO: Create a Subscription object named worker-subscription - line 60: # TODO: Publish the feedback object to the feedback topic - line 76: # TODO: Subscriber to the worker-subscription, courses/developingapps/python/kubernetesengine/end/frontend/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 45: # TODO: Create a Subscription object named worker-subscription - line 60: # TODO: Publish the feedback object to the feedback topic - line 76: # TODO: Subscriber to the worker-subscription, courses/developingapps/v1.3/python/kubernetesengine/end/backend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/v1.2/python/appengine/end/frontend/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 45: # TODO: Create a Subscription object named worker-subscription - line 60: # TODO: Publish the feedback object to the feedback topic - line 76: # TODO: Subscriber to the worker-subscription, courses/developingapps/v1.2/python/kubernetesengine/bonus/frontend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/v1.2/java/pubsub-languageapi-spanner/start/src/main/java/com/google/training/appdev/services/gcp/spanner/SpannerService.java (7 lines): - line 32: // TODO: Get a reference to the Spanner API - line 39: // TODO: Get a reference to the quiz-instance and its quiz-database - line 45: // TODO: Get a client for the quiz-database - line 51: // TODO: Create a list to hold mutations against the database - line 57: // TODO: Add an insert mutation - line 60: // TODO: Build a new insert mutation - line 72: // TODO: Write the change to Spanner courses/developingapps/v1.3/python/appengine/start/frontend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/machine_learning/deepdive/03_model_performance/labs/c_custom_keras_estimator.ipynb (7 lines): - line 210: " # TODO: Your code goes here\n", - line 211: " # TODO: Your code goes here\n", - line 212: " # TODO: Your code goes here\n", - line 213: " # TODO: Your code goes here\n", - line 214: " # TODO: Your code goes here\n", - line 215: " # TODO: Your code goes here\n", - line 290: " # TODO: Your code goes here\n", courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/load_diff_filedata.ipynb (7 lines): - line 176: " # TODO 1 \n", - line 205: " # TODO 2 \n", - line 837: "# TODO 1\n", - line 853: "# TODO 2\n", - line 1142: "# TODO 1\n", - line 1237: "# TODO 1\n", - line 1266: "# TODO 2\n", courses/developingapps/python/pubsub-languageapi-spanner/start/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a reference to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 71: # TODO: Use the batch to insert a record courses/developingapps/v1.2/python/pubsub-languageapi-spanner/start/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create Topic Object to reference feedback topic - line 39: # TODO: Create a Pub/Sub Subscriber Client - line 45: # TODO: Create a Subscription object named - line 61: # TODO: Publish the feedback object to the feedback topic - line 74: # TODO: Subscriber to the worker-subscription, courses/developingapps/v1.3/python/pubsub-languageapi-spanner/bonus/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/python/kubernetesengine/end/backend/start/frontend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/v1.3/python/pubsub-languageapi-spanner/end/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 66: # TODO: Create a key for the record - line 76: # TODO: Use the batch to insert a record courses/machine_learning/deepdive2/time_series_prediction/solutions/4_modeling_keras.ipynb (7 lines): - line 501: "# TODO 1a\n", - line 577: "# TODO 1b\n", - line 649: "# TODO 1c\n", - line 731: "# TODO 2a\n", - line 803: "# TODO 2b\n", - line 881: "# TODO 3a\n", - line 957: "# TODO 3b\n", courses/developingapps/v1.2/python/appengine/start/frontend/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 45: # TODO: Create a Subscription object named worker-subscription - line 60: # TODO: Publish the feedback object to the feedback topic - line 76: # TODO: Subscriber to the worker-subscription, courses/machine_learning/deepdive/10_recommend/composer_gcf_trigger/simple_load_dag.py (7 lines): - line 53: # TODO: Populate the models.Variable.get() with the actual variable name for your output bucket - line 66: # TODO: Populate the models.Variable.get() with the variable name for your GCP Project - line 71: # TODO: Populate the models.Variable.get() with the variable name for temp location - line 113: # TODO: Name the DAG id GcsToBigQueryTriggered - line 121: # TODO: Populate the models.Variable.get() with the variable name for BQ table - line 124: # TODO: Populate the models.Variable.get() with the variable name for input field names - line 130: # TODO: Specify the type of operator we need to call to invoke DataFlow courses/developingapps/python/kubernetesengine/bonus/frontend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/python/pubsub-languageapi-spanner/start/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 45: # TODO: Create a Subscription object named worker-subscription - line 60: # TODO: Publish the feedback object to the feedback topic - line 76: # TODO: Subscriber to the worker-subscription, courses/developingapps/v1.3/python/appengine/start/frontend/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 45: # TODO: Create a Subscription object named worker-subscription - line 60: # TODO: Publish the feedback object to the feedback topic - line 76: # TODO: Subscriber to the worker-subscription, courses/developingapps/python/kubernetesengine/start/backend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/machine_learning/deepdive/10_recommend/composer_gcf_trigger/composertriggered.ipynb (7 lines): - line 232: "# TODO: Populate the models.Variable.get() with the actual variable name for your output bucket\n", - line 245: " # TODO: Populate the models.Variable.get() with the variable name for your GCP Project\n", - line 250: " # TODO: Populate the models.Variable.get() with the variable name for temp location\n", - line 292: "# TODO: Name the DAG id GcsToBigQueryTriggered\n", - line 300: " # TODO: Populate the models.Variable.get() with the variable name for BQ table\n", - line 303: " # TODO: Populate the models.Variable.get() with the variable name for input field names\n", - line 309: " # TODO: Specify the type of operator we need to call to invoke DataFlow\n", courses/developingapps/v1.3/python/kubernetesengine/bonus/backend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/v1.3/java/pubsub-languageapi-spanner/start/src/main/java/com/google/training/appdev/services/gcp/spanner/SpannerService.java (7 lines): - line 32: // TODO: Get a reference to the Spanner API - line 39: // TODO: Get a reference to the quiz-instance and its quiz-database - line 45: // TODO: Get a client for the quiz-database - line 51: // TODO: Create a list to hold mutations against the database - line 57: // TODO: Add an insert mutation - line 60: // TODO: Build a new insert mutation - line 72: // TODO: Write the change to Spanner courses/developingapps/python/pubsub-languageapi-spanner/end/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/developingapps/v1.2/python/kubernetesengine/end/frontend/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 45: # TODO: Create a Subscription object named worker-subscription - line 60: # TODO: Publish the feedback object to the feedback topic - line 76: # TODO: Subscriber to the worker-subscription, courses/developingapps/python/kubernetesengine/end/frontend/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a referent to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 63: # TODO: Create a key for the record - line 72: # TODO: Use the batch to insert a record courses/machine_learning/deepdive/03_tensorflow/labs/b_estimator.ipynb (7 lines): - line 95: "# TODO: Create an appropriate input_fn to read the training data\n", - line 108: "# TODO: Create an appropriate input_fn to read the validation data\n", - line 133: "# TODO: Create an appropriate prediction_input_fn\n", - line 158: "# TODO: Create feature columns" - line 179: "# TODO: Train a linear regression model\n", - line 218: "# TODO: Predict from the estimator model we trained using test dataset" - line 241: "# TODO: Copy your LinearRegressor estimator and replace with DNNRegressor. Remember to add a list of hidden units i.e. [32, 8, 2]\n" courses/developingapps/v1.3/python/pubsub-languageapi-spanner/start/quiz/gcp/spanner.py (7 lines): - line 17: # TODO: Import the spanner module - line 27: # TODO: Create a spanner Client - line 34: # TODO: Get a reference to the Cloud Spanner quiz-instance - line 40: # TODO: Get a reference to the Cloud Spanner quiz-database - line 60: # TODO: Create a batch object for database operations - line 66: # TODO: Create a key for the record - line 74: # TODO: Use the batch to insert a record courses/developingapps/python/appengine/start/frontend/quiz/gcp/pubsub.py (7 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Publisher Client - line 33: # TODO: Create a Pub/Sub Subscriber Client - line 39: # TODO: Create a Topic Object to reference the feedback topic - line 45: # TODO: Create a Subscription object named worker-subscription - line 60: # TODO: Publish the feedback object to the feedback topic - line 76: # TODO: Subscriber to the worker-subscription, courses/machine_learning/deepdive2/how_google_does_ml/solutions/automl-tabular-classification.ipynb (6 lines): - line 603: "# TODO 1\n", - line 624: "# TODO 2a\n", - line 630: " # TODO 2b\n", - line 689: "# TODO 3\n", - line 739: "# TODO 4\n", - line 809: "# TODO 5 \n", courses/developingapps/python/pubsub-languageapi-spanner/end/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/machine_learning/deepdive2/end_to_end_ml/labs/application/main.py (6 lines): - line 17: project = "" # TODO:Input your project name - line 18: model_name = "" # TODO: Input your model name - line 19: version_name = "" # TODO: Input your model version name - line 22: # TODO: Write a formatted string to make a prediction against a CAIP deployed model. - line 39: # TODO: complete genders mapping dictionary. - line 44: # TODO: complete pluralities mapping dictionary. courses/machine_learning/deepdive2/feature_engineering/solutions/mobile_gaming_feature_store.ipynb (6 lines): - line 56: "Install additional package dependencies not installed in your notebook environment, such as {XGBoost, AdaNet, or TensorFlow Hub TODO: Replace with relevant packages for the tutorial}. Use the latest major GA version of each package." - line 890: "# TODO 1\n", - line 1691: "# TODO 2\n", - line 2728: "# TODO 3\n", - line 2914: "# TODO 4\n", - line 3992: "# TODO 5\n", courses/developingapps/v1.3/python/kubernetesengine/bonus/frontend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/machine_learning/deepdive2/end_to_end_ml/solutions/application/main.py (6 lines): - line 17: project = os.getenv("PROJECT", "asl-ml-immersion") # TODO:Input your project name - line 18: model_name = os.getenv("MODEL_NAME", "babyweight") # TODO: Input your model name - line 19: version_name = os.getenv("VERSION_NAME", "ml_on_gcp") # TODO: Input your model version name - line 22: # TODO: Write a formatted string to make a prediction against a CAIP deployed model. - line 40: # TODO: complete genders mapping dictionary. - line 45: # TODO: complete pluralities mapping dictionary. courses/machine_learning/deepdive2/structured/labs/3c_bqml_dnn_babyweight.ipynb (6 lines): - line 149: " # TODO: Add DNN options\n", - line 154: " # TODO: Add base features and label\n", - line 279: " # TODO: Add FEATURE CROSS of:\n", - line 283: " # TODO: Add DNN options\n", - line 398: " # TODO Add base features example from original dataset\n", - line 424: " # TODO Add base features example from simulated dataset\n", courses/machine_learning/deepdive/01_bigquery/labs/c_extract_and_benchmark.ipynb (6 lines): - line 196: " # TODO: Your code goes here\n", - line 200: " # TODO: Your code goes here\n", - line 237: " query_string = # TODO: Your code goes here\n", - line 240: " df = # TODO: Your code goes here\n", - line 326: " return # TODO: Your code goes here\n", - line 329: " return # TODO: Your code goes here\n", courses/developingapps/v1.2/python/appengine/start/frontend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score quests/endtoendml/solutions/labs/5_train.ipynb (6 lines): - line 173: " ## TODO 1: add the new arguments here \n", - line 356: " ## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL\n", - line 360: " ## TODO 2b: change the dnn_hidden_units to NNSIZE\n", - line 373: " ## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE \n", - line 374: " ## TODO 2d: and set max_steps to TRAIN_STEPS\n", - line 381: " ## TODO 2e: Lastly, set steps equal to EVAL_STEPS\n", courses/developingapps/python/kubernetesengine/bonus/backend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/developingapps/python/kubernetesengine/end/backend/start/frontend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/machine_learning/deepdive2/structured/labs/application/main.py (6 lines): - line 17: project = "" # TODO:Input your project name - line 18: model_name = "" # TODO: Input your model name - line 19: version_name = "" # TODO: Input your model version name - line 22: # TODO: Write a formatted string to make a prediction against a CAIP deployed model. - line 39: # TODO: complete genders mapping dictionary. - line 44: # TODO: complete pluralities mapping dictionary. courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/2_dataset_api.ipynb (6 lines): - line 125: "# TODO 1\n", - line 261: "# TODO 2\n", - line 393: "# TODO 3\n", - line 486: "# TODO 4a\n", - line 573: "# TODO 4b\n", - line 652: "# TODO 4c\n", courses/developingapps/v1.2/python/pubsub-languageapi-spanner/bonus/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/machine_learning/deepdive2/machine_learning_in_the_enterprise/labs/sdk_custom_tabular_regression_online_explain.ipynb (6 lines): - line 978: "job = # TODO 1: Your code goes here(\n", - line 1282: "# TODO 2: Your code goes here" - line 1564: "model = # TODO 3: Your code goes here(\n", - line 1634: " endpoint = # TODO 4a: Your code goes here(\n", - line 1644: " endpoint = # TODO 4b: Your code goes here(\n", - line 1882: "prediction = # TODO 5: Your code goes here\n", courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/what_if_mortgage.ipynb (6 lines): - line 610: "# TODO 1\n", - line 618: "# TODO 1a\n", - line 670: "# TODO 1b\n", - line 691: "#### TODO 2\n", - line 697: "#### TODO 2a\n", - line 702: "#### TODO 2b\n", courses/machine_learning/deepdive2/introduction_to_tensorflow/labs/custom_layers_and_models.ipynb (6 lines): - line 207: "y = # TODO: Your code goes here\n", - line 375: " shape=# TODO: Your code goes here,\n", - line 460: "mlp = # TODO: Your code goes here\n", - line 531: " self.activity_reg = # TODO: Your code goes here\n", - line 734: " acc = # TODO: Your code goes here\n", - line 885: "config = # TODO: Your code goes here\n", courses/developingapps/v1.3/python/kubernetesengine/bonus/backend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/machine_learning/deepdive/06_structured/5_train.ipynb (6 lines): - line 170: " ## TODO 1: add the new arguments here \n", - line 365: " ## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL\n", - line 369: " ## TODO 2b: change the dnn_hidden_units to NNSIZE\n", - line 382: " ## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE \n", - line 383: " ## TODO 2d: and set max_steps to TRAIN_STEPS\n", - line 390: " ## TODO 2e: Lastly, set steps equal to EVAL_STEPS\n", courses/developingapps/v1.2/python/kubernetesengine/bonus/backend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/developingapps/python/kubernetesengine/end/backend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/developingapps/v1.3/python/appengine/end/frontend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/machine_learning/deepdive2/machine_learning_in_the_enterprise/labs/sdk-custom-image-classification-batch.ipynb (6 lines): - line 936: "job = # TODO -- Your code goes here(\n", - line 948: " model = # TODO -- Your code goes here(\n", - line 957: " model = # TODO -- Your code goes here(\n", - line 1203: "batch_prediction_job = # TODO -- Your code goes here(\n", - line 1371: "# TODO -- Your code goes here()\n", - line 1374: "# TODO -- Your code goes here()\n", courses/machine_learning/deepdive2/structured/solutions/application/main.py (6 lines): - line 17: project = os.getenv("PROJECT", "asl-ml-immersion") # TODO:Input your project name - line 18: model_name = os.getenv("MODEL_NAME", "babyweight") # TODO: Input your model name - line 19: version_name = os.getenv("VERSION_NAME", "ml_on_gcp") # TODO: Input your model version name - line 22: # TODO: Write a formatted string to make a prediction against a CAIP deployed model. - line 40: # TODO: complete genders mapping dictionary. - line 45: # TODO: complete pluralities mapping dictionary. courses/developingapps/python/appengine/end/frontend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/machine_learning/deepdive2/feature_engineering/labs/6_gapic_feature_store.ipynb (6 lines): - line 1258: "# TODO 1a -- Your code here\n", - line 1336: "# TODO 1b -- Your code here\n", - line 1475: "# TODO 2a -- Read one entity per request\n" - line 1504: "# TODO 2b -- Read multiple entities per request\n", - line 1683: " # TODO 3a -- Your code here\n", - line 1687: " # TODO 3b -- Your code here\n", courses/machine_learning/asl/01_bigquery/labs/c_extract_and_benchmark.ipynb (6 lines): - line 196: " # TODO: Your code goes here\n", - line 200: " # TODO: Your code goes here\n", - line 237: " query_string = # TODO: Your code goes here\n", - line 240: " df = # TODO: Your code goes here\n", - line 326: " return # TODO: Your code goes here\n", - line 329: " return # TODO: Your code goes here\n", courses/machine_learning/deepdive2/recommendation_systems/labs/als_bqml_hybrid_old.ipynb (6 lines): - line 261: "**TODO 1**: Combine the above two queries to get the user factors and product factor for each rating." - line 281: " # TODO: Place the user features query here\n", - line 285: " # TODO: Place the product features query here\n", - line 396: "**TODO 2**: Create a function that returns named columns from a size 16 product factor array." - line 421: " # TODO: Finish building this struct\n", - line 436: " # TODO: Finish building this struct\n", courses/developingapps/python/kubernetesengine/bonus/frontend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/machine_learning/deepdive2/feature_engineering/labs/mobile_gaming_feature_store.ipynb (6 lines): - line 56: "Install additional package dependencies not installed in your notebook environment, such as {XGBoost, AdaNet, or TensorFlow Hub TODO: Replace with relevant packages for the tutorial}. Use the latest major GA version of each package." - line 890: "bq_client = # TODO 1: Your code goes here(project=PROJECT_ID, location=LOCATION)\n", - line 1690: "# TODO 2: Your code goes here(\n", - line 2726: "# TODO 3: Your code goes here" - line 2911: "deployed_model = # TODO 4: Your code goes here(\n", - line 3988: "# TODO 5: Your code goes here(endpoint=endpoint, n_requests=1000, latency=1)" courses/developingapps/v1.2/python/appengine/end/frontend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/machine_learning/deepdive2/supplemental/solutions/deepconv_gan.ipynb (6 lines): - line 262: "#TODO 1\n", - line 342: "#TODO 1.\n", - line 469: "#TODO 2\n", - line 506: "#TODO 2\n", - line 631: "# TODO 3\n", - line 777: "# TODO 4\n", quests/endtoendml/solutions/5_train.ipynb (6 lines): - line 170: " ## TODO 1: add the new arguments here \n", - line 365: " ## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL\n", - line 369: " ## TODO 2b: change the dnn_hidden_units to NNSIZE\n", - line 382: " ## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE \n", - line 383: " ## TODO 2d: and set max_steps to TRAIN_STEPS\n", - line 390: " ## TODO 2e: Lastly, set steps equal to EVAL_STEPS\n", courses/machine_learning/deepdive/06_structured/labs/5_train.ipynb (6 lines): - line 173: " ## TODO 1: add the new arguments here \n", - line 356: " ## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL\n", - line 360: " ## TODO 2b: change the dnn_hidden_units to NNSIZE\n", - line 373: " ## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE \n", - line 374: " ## TODO 2d: and set max_steps to TRAIN_STEPS\n", - line 381: " ## TODO 2e: Lastly, set steps equal to EVAL_STEPS\n", courses/machine_learning/deepdive2/how_google_does_ml/labs/automl-tabular-classification.ipynb (6 lines): - line 604: "job = # TODO 1 -- Your code goes here(\n", - line 625: "model = # TODO 2a -- Your code goes here(\n", - line 629: " # TODO 2b -- Your code goes here \n", - line 685: "endpoint = # TODO 3 -- Your code goes here(\n", - line 734: "prediction = # TODO 4 -- Your code goes here(\n", - line 803: "# TODO 5 -- Your code goes here" courses/developingapps/python/kubernetesengine/start/backend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/developingapps/python/kubernetesengine/end/backend/start/backend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/developingapps/v1.2/python/kubernetesengine/bonus/frontend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score quests/serverlessml/03_tfdata/labs/input_pipeline.ipynb (6 lines): - line 141: "# TODO 1: Use tf.data to read CSV files\n", - line 145: "# TODO 2: Load the training data into memory\n", - line 186: " # TODO 3: Prune the data by removing column named 'key'\n", - line 220: " # TODO 4: Use tf.data to map features and labels\n", - line 225: "# TODO 5: Experiment by adjusting batch size\n", - line 254: " # TODO 6: Add dataset.shuffle 1000 to our dataset and have it repeat\n", courses/developingapps/python/kubernetesengine/end/frontend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/machine_learning/deepdive2/end_to_end_ml/labs/sample_babyweight.ipynb (6 lines): - line 126: "# TODO 1\n", - line 127: "# TODO -- Your code here.\n", - line 1795: "# TODO 2\n", - line 1796: "# TODO -- Your code here.\n", - line 2077: " # TODO 3\n", - line 2078: " # TODO -- Your code here.\n", courses/machine_learning/deepdive/10_recommend/labs/content_based_preproc.ipynb (6 lines): - line 117: "**Hint**: For the TODO below, modify the query above changing 'content_id' to the necessary field and changing index=10 \n", - line 130: "TODO: Modify the query above to instead create a list of all categories in the dataset.\n", - line 133: "categories_list = #TODO: Modify the query above to create the list of categories\n", - line 176: "In this section, we will create the train/test split of our data for training our model. Read through the query and complete the TODO at the bottom. \n", - line 222: " TODO: Use FARM_FINGERPRINT on the concatenated visitor_id and content_id to create a training set of approximately 90% of the data\n", - line 279: " #TODO: Modify the FARM_FINGERPRINT you used in the previous cell to create a test set of approximately 10% of the data\n", courses/machine_learning/deepdive2/recommendation_systems/labs/als_bqml_hybrid.ipynb (6 lines): - line 316: "**TODO 1**: Combine the above two queries to get the user factors and product factor for each rating." - line 336: " # TODO: Place the user features query here\n", - line 340: " # TODO: Place the product features query here\n", - line 451: "**TODO 2**: Create a function that returns named columns from a size 16 product factor array." - line 476: " # TODO: Finish building this struct\n", - line 491: " # TODO: Finish building this struct\n", courses/developingapps/python/appengine/start/frontend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/machine_learning/deepdive2/launching_into_ml/labs/first_model.ipynb (6 lines): - line 156: "# TODO 1: Choose the correct ML model_type for forecasting:\n", - line 194: "# TODO 2: Specify the command to evaluate your newly trained model\n", - line 255: " # Placeholder for additional filters as part of TODO 3 later\n", - line 265: "TODO 3: Now apply the below filters to the previous query inside the WHERE clause. Does the performance improve? Why or why not?\n", - line 393: "# TODO 4a: Choose correct BigQuery ML model type for DNN and label field\n", - line 441: "# TODO 4b: What is the command to see how well a \n", courses/developingapps/v1.3/python/appengine/start/frontend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/developingapps/v1.3/python/pubsub-languageapi-spanner/bonus/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/developingapps/python/pubsub-languageapi-spanner/bonus/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/machine_learning/deepdive2/feature_engineering/solutions/6_gapic_feature_store.ipynb (6 lines): - line 1258: "# TODO 1a\n", - line 1354: "# TODO 1b\n", - line 1509: "# TODO 2a\n", - line 1549: "# TODO 2b\n", - line 1735: " # TODO 3a\n", - line 1750: " # TODO 3b\n", quests/serverlessml/02_bqml/labs/first_model.ipynb (6 lines): - line 141: "# TODO 1: Choose the correct ML model_type for forecasting: Linear Regression (linear_reg) or Logistic Regression (logistic_reg)\n", - line 177: "# TODO 2: Specify the command to evaluate your newly trained model\n", - line 231: " # placeholder for additional filters as part of TODO 3 later\n", - line 241: "TODO 3: Now apply the below filters to the previous query inside the WHERE clause. Does the performance improve? Why or why not?\n", - line 361: "# TODO 4a: Choose the appropriate BigQuery ML model type for DNN and the correct label field\n", - line 405: "# TODO 4b: What is the command to see how well a \n", courses/machine_learning/deepdive2/introduction_to_tensorflow/labs/what_if_mortgage.ipynb (6 lines): - line 610: "# TODO 1\n", - line 618: "# TODO 1a\n", - line 670: "# TODO 1b\n", - line 691: "#### TODO 2\n", - line 697: "#### TODO 2a\n", - line 702: "#### TODO 2b\n", courses/developingapps/python/pubsub-languageapi-spanner/start/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/developingapps/python/kubernetesengine/start/frontend/quiz/gcp/languageapi.py (6 lines): - line 15: # TODO: Import the language module - line 21: # TODO: Import enums and types - line 29: # TODO: Create the Language API client - line 43: # TODO: Create a Document object - line 49: # TODO: Analyze the sentiment - line 56: # TODO: Return the sentiment score courses/machine_learning/deepdive2/building_production_ml_systems/solutions/3_kubeflow_pipelines.ipynb (5 lines): - line 135: "**TODO 1**" - line 180: "**TODO 2**" - line 316: "**TODO 3**" - line 427: "# TODO 3\n", - line 499: "# TODO 4\n", courses/machine_learning/deepdive/05_artandscience/labs/a_handtuning.ipynb (5 lines): - line 203: " estimator = #TODO: Use LinearRegressor estimator\n", - line 212: " input_fn = ,#TODO: use tf.compat.v1.estimator.inputs.pandas_input_fn \n", - line 215: " input_fn = ,#TODO: use tf.compat.v1.estimator.inputs.pandas_input_fn\n", - line 285: " myopt = #TODO: use tf.compat.v1.train.FtrlOptimizer and set learning rate\n", - line 298: " input_fn = ,#TODO: make sure to specify batch_size\n", courses/machine_learning/deepdive2/art_and_science_of_ml/solutions/sdk-custom-image-classification-online.ipynb (5 lines): - line 733: "# TODO 1: Create a custom job for training a model\n", - line 963: "# TODO 2: Start the training\n", - line 1049: "# TODO 3: Deploy the Model\n", - line 1191: "# TODO 4: Make a prediction\n", - line 1225: "# TODO 5\n", courses/machine_learning/deepdive2/machine_learning_in_the_enterprise/labs/comparing_local_trained_models.ipynb (5 lines): - line 56: "Each learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/comparing_local_trained_models.ipynb)." - line 967: " # TODO 1: Get the model\n", - line 974: " # TODO 2: Train the model\n", - line 1354: "# TODO 3: Evaluate model\n", - line 1562: "# TODO 4: Get experiment\n", courses/developingapps/v1.2/python/pubsub-languageapi-spanner/end/quiz/gcp/languageapi.py (5 lines): - line 15: # TODO: Import the language module - line 22: # TODO: Create the Language API client - line 36: # TODO: Create a Document object - line 42: # TODO: Analyze the sentiment - line 49: # TODO: Return the sentiment score courses/machine_learning/deepdive2/launching_into_ml/solutions/rapid_prototyping_bqml_automl.ipynb (5 lines): - line 682: " # TODO 1: Construct a BigQuery client object.\n", - line 1053: " # TODO 2: List the model evaluations.\n", - line 1156: " # TODO 3: Change the condition if higher is better.\n", - line 1256: " # TODO 4: Make a simple prediction\n", - line 1515: "# TODO 5: Run the pipeline job\n", courses/machine_learning/deepdive2/feature_engineering/solutions/5_tftransform_taxifare.ipynb (5 lines): - line 759: " # TODO 1\n", - line 763: " # TODO 2\n", - line 775: " # TODO 3\n", - line 875: " # TODO 4\n", - line 888: " # TODO 5\n", courses/developingapps/v1.3/python/pubsub-languageapi-spanner/end/quiz/gcp/languageapi.py (5 lines): - line 15: # TODO: Import the language module - line 22: # TODO: Create the Language API client - line 36: # TODO: Create a Document object - line 42: # TODO: Analyze the sentiment - line 49: # TODO: Return the sentiment score courses/machine_learning/deepdive/02_tensorflow/labs/b_tfstart_graph.ipynb (5 lines): - line 205: " w0 = # TODO: Your code goes here\n", - line 206: " w1 = # TODO: Your code goes here\n", - line 242: "LEARNING_RATE = # TODO: Your code goes here\n", - line 243: "optimizer = # TODO: Your code goes here" - line 280: " # TODO: Your code goes here\n", courses/developingapps/v1.3/python/kubernetesengine/end/backend/quiz/gcp/languageapi.py (5 lines): - line 15: # TODO: Import the language module - line 22: # TODO: Create the Language API client - line 36: # TODO: Create a Document object - line 42: # TODO: Analyze the sentiment - line 49: # TODO: Return the sentiment score courses/machine_learning/deepdive2/feature_engineering/labs/5_tftransform_taxifare.ipynb (5 lines): - line 304: " # TODO 1: convert day of week from string->int with tft.string_to_int\n", - line 308: " # TODO 2: scale pickup/dropoff lat/lon between 0 and 1 with tft.scale_to_0_1\n", - line 318: " # TODO 3: Scale our engineered features latdiff and londiff between 0 and 1\n", - line 407: " # TODO 4: Analyze and transform our training data\n", - line 424: " # TODO 5: Read eval data from BigQuery using beam.io.BigQuerySource\n", courses/machine_learning/deepdive2/feature_engineering/solutions/sdk-feature-store.ipynb (5 lines): - line 341: "# TODO 1a\n", - line 661: "# TODO 1b\n", - line 1052: "# TODO 2\n", - line 1219: "# TODO 3\n", - line 1581: "# TODO 4\n", courses/machine_learning/deepdive/08_image_keras/labs/flowersmodel/model.py (5 lines): - line 104: image = # TODO: decode contents into JPEG - line 105: image = # TODO: convert JPEG tensor to floats between 0 and 1 - line 109: # TODO: add image augmentation functions - line 138: dataset = #TODO: map read_and_preprocess_with_augment - line 140: dataset = #TODO: map read_and_preprocess courses/machine_learning/deepdive2/image_classification/labs/4_tpu_training.ipynb (5 lines): - line 112: " # TODO: define a TPU strategy\n", - line 113: " resolver = # TODO: Your code goes here\n", - line 116: " strategy = # TODO: Your code goes here\n", - line 239: " # TODO: Your code goes here \\\n", - line 240: " # TODO: Your code goes here \\\n", courses/machine_learning/deepdive/09_sequence/labs/sinemodel/model.py (5 lines): - line 41: #TODO: finish linear model - line 46: #TODO: finish DNN model - line 51: #TODO: finish CNN model - line 57: #TODO: finish rnn model - line 63: #TODO: finish 2-layer rnn model courses/machine_learning/asl/02_tensorflow/labs/b_tfstart_graph.ipynb (5 lines): - line 205: " w0 = # TODO: Your code goes here\n", - line 206: " w1 = # TODO: Your code goes here\n", - line 242: "LEARNING_RATE = # TODO: Your code goes here\n", - line 243: "optimizer = # TODO: Your code goes here" - line 280: " # TODO: Your code goes here\n", courses/machine_learning/deepdive2/production_ml/labs/parameter_server_training.ipynb (5 lines): - line 482: "# TODO: Your code goes here" - line 906: " train_dataset = # TODO: Your code goes here" - line 938: " # TODO: Your code goes here\n", - line 1108: " # TODO: Your code goes here" - line 1207: "# TODO: Your code goes here" courses/developingapps/v1.3/python/pubsub-languageapi-spanner/start/quiz/gcp/languageapi.py (5 lines): - line 15: # TODO: Import the language module - line 22: # TODO: Create the Language API client - line 36: # TODO: Create a Document object - line 42: # TODO: Analyze the sentiment - line 49: # TODO: Return the sentiment score courses/machine_learning/deepdive2/recommendation_systems/solutions/basic_retrieval.ipynb (5 lines): - line 216: "# TODO 1 - Your code is here.\n", - line 606: "# TODO 2 - Your code is here.\n", - line 1189: "# TODO 3a - Your code is here.\n", - line 1294: "# TODO 3b - Your code is here.\n", - line 1440: "# TODO 4 - Your code is here.\n", quests/serverlessml/06_feateng_keras/labs/taxifare_fc.ipynb (5 lines): - line 209: " # TODO 1: Recall from earlier how you used tf.data to read the CSV files (no changes needed):\n", - line 267: " # TODO 2: Create two new features called londiff and latdiff\n", - line 385: " # TODO 3: Specify the dense feature layers for the DNN as inputs\n", - line 541: "# TODO 4: Make example predictions. Experiment with different passenger_counts and pickup times and re-run.\n", - line 580: "# TODO 5: Export the model in preparation for serving later\n", courses/developingapps/v1.2/nodejs/pubsub-languageapi-spanner/end/server/api/index.js (5 lines): - line 20: // TODO: Load the ../gcp/pubsub module - line 53: // TODO: Publish the message into Cloud Pub/Sub - line 55: // TODO: Move the statement that returns a message to - line 60: // TODO: Add a catch - line 62: // TODO: There was an error, invoke the next middleware courses/developingapps/v1.3/java/cloudstorage/start/src/main/java/com/google/training/appdev/services/gcp/cloudstorage/ImageService.java (5 lines): - line 18: // TODO: Write a star import for Cloud Storage - line 35: // TODO: Create the storage client - line 46: // TODO: Get the name of the Cloud Storage bucket - line 66: // TODO: Create a new Cloud Storage object - line 76: // TODO: Cloud Storage public URLs are in the form: courses/machine_learning/deepdive2/production_ml/labs/comparing_local_trained_models.ipynb (5 lines): - line 56: "Each learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/comparing_local_trained_models.ipynb)." - line 967: " # TODO 1: Get the model\n", - line 974: " # TODO 2: Train the model\n", - line 1354: "# TODO 3: Evaluate model\n", - line 1562: "# TODO 4: Get experiment\n", courses/machine_learning/deepdive2/launching_into_ml/labs/rapid_prototyping_bqml_automl.ipynb (5 lines): - line 683: " client = # TODO 1: Your code goes here\n", - line 1054: " model_evaluation = # TODO 2: Your code goes here\n", - line 1157: " # TODO 3: Your code goes here\n", - line 1256: " prediction = # TODO 4: Your code goes here\n", - line 1514: "pipeline_job = # TODO 5: Your code goes here(\n", courses/machine_learning/deepdive2/how_google_does_ml/inclusive_ml/labs/inclusive_ml.ipynb (5 lines): - line 220: "# TODO 1\n", - line 272: "**TODO 2:** Your first action will be to examine data and its disribution along dimensions that are relevant to loan scoring. The intial presentation in the tool shows all datapoints. Blue dots are those individuals predicted as having incomes above 50k. Red dots are those predicted as having incomes below 50k." - line 314: "**TODO 2:** Next navigate to the Features tab, here you can see the exact distribution of values for every feature in the dataset. If you type \"sex\" into the filter box, you will see that of the 2,000 test datapoints, 670 from Women and 1,330 are from Men (as mentioned earlier the value \"1\" was assigned to Females and \"2\" was assigned to Males). The dataset reflects an imbalance between Females and Males with nearly double the number of cases that are Male. Women seem under-represented in this dataset." - line 328: "**TODO 3:** On the \"Performance + Fairness\" tab, you can set an input feature (or set of features) by which to slice the data. This will allow you to evaluate the fairness of specific groups. Income Prediction (corresponding to over or under 50k) has already been selected as the \"Ground Truth Feature\". On the \"Slice by\" selector, scroll to find and choose \"Sex\"." - line 370: "**TODO 4:** On the \"Performance + Fairness\" tab, select \"Demographic Parity\" to see the results." quests/dataflow_python/7_Advanced_Streaming_Analytics/lab/streaming_minute_traffic_pipeline.py (5 lines): - line 36: yield #TODO: TaggedOutput with tag 'parsed_row' and output CommonLog - line 39: yield #TODO: TaggedOutput with tag 'unparsed_row' and output CommonLog - line 113: #TODO: Set trigger and accumulaton mode) - line 114: | 'WriteUnparsedToGCS' >># TODO: Use fileio.WriteToFiles to write out to GCS - line 119: # TODO: Set allowed lateness, trigger, and accumulation mode courses/machine_learning/deepdive2/recommendation_systems/labs/featurization.ipynb (5 lines): - line 2023: "movie_title_embedding = # TODO: Your code goes here\n", - line 2207: "timestamp_normalization = # TODO: Your code goes here\n", - line 2337: "title_text = # TODO: Your code goes here\n", - line 2501: "user_model = # TODO: Your code goes here\n", - line 2583: "movie_model = # TODO: Your code goes here\n", courses/machine_learning/deepdive2/launching_into_ml/labs/TrainingWithXGBoostInCMLE.ipynb (5 lines): - line 179: "# TODO: REPLACE 'BUCKET_CREATED_ABOVE' with your GCS BUCKET_ID\n", - line 196: "# TODO 1a: Your code here\n", - line 262: "#TODO 1b: Your code here\n", - line 299: "# TODO 2: Your code here\n", - line 378: "# TODO 3: Your code here\n" courses/machine_learning/deepdive2/recommendation_systems/labs/basic_retrieval.ipynb (5 lines): - line 216: "# TODO 1 - Your code goes below here.\n", - line 604: "# TODO 2 - Your code goes below here.\n", - line 1184: "# TODO 3a - Your code goes below here.\n", - line 1288: "# TODO 3b - Your code goes below here.\n", - line 1434: "# TODO 4 - Your code goes here.\n", courses/developingapps/v1.3/nodejs/pubsub-languageapi-spanner/end/server/api/index.js (5 lines): - line 20: // TODO: Load the ../gcp/pubsub module - line 53: // TODO: Publish the message into Cloud Pub/Sub - line 55: // TODO: Move the statement that returns a message to - line 60: // TODO: Add a catch - line 62: // TODO: There was an error, invoke the next middleware courses/machine_learning/deepdive2/building_production_ml_systems/labs/1_training_at_scale_vertex.ipynb (5 lines): - line 544: "# TODO 1a: Your code here\n", - line 550: "# TODO 1b: Your code here\n", - line 828: "# TODO 2: Your code here" - line 879: "# TODO 3: Your code here" - line 913: "TODO: To submit to the Cloud we use [`gcloud ai custom-jobs create`](https://cloud.google.com/sdk/gcloud/reference/ai/custom-jobs/create) and simply specify some additional parameters for Vertex AI Training Service:\n", quests/dataflow_scala/3_Batch_Analytics/labs/src/main/scala/scio/mypackage/pipeline/BatchUserTrafficPipeline.scala (5 lines): - line 19: //TODO: Add imports - line 28: //TODO: Add CommonLog Class - line 30: // TODO: Add A DoFn acccepting Json and outputing CommonLog with Beam Schema - line 32: //TODO: Add UserTraffic Class - line 72: // Step4: TODO: Aggregate traffic by user using combine functionality courses/developingapps/v1.2/nodejs/pubsub-languageapi-spanner/start/server/api/index.js (5 lines): - line 21: // TODO: Load the ../gcp/pubsub module - line 58: // TODO: Publish the message into Cloud Pub/Sub - line 61: // TODO: Move the statement that returns a message to - line 68: // TODO: Add a catch - line 70: // TODO: There was an error, invoke the next middleware courses/machine_learning/deepdive2/launching_into_ml/solutions/TrainingWithXGBoostInCMLE.ipynb (5 lines): - line 179: "# TODO: REPLACE 'BUCKET_CREATED_ABOVE' with your GCS BUCKET_ID\n", - line 196: "# TODO 1a: Download the data from Google Cloud Storage\n", - line 264: "# TODO 1b: Export and save the model to GCS\n", - line 307: "# TODO 2: Prepare a package\n", - line 386: "# TODO 3: Submit the training job\n", courses/machine_learning/deepdive/06_structured/babyweight/trainer/model.py (5 lines): - line 117: ## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL - line 121: ## TODO 2b: change the dnn_hidden_units to NNSIZE - line 134: ## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE - line 135: ## TODO 2d: and set max_steps to TRAIN_STEPS - line 142: ## TODO 2e: Lastly, set steps equal to EVAL_STEPS courses/machine_learning/deepdive2/art_and_science_of_ml/solutions/distributed_training.ipynb (5 lines): - line 96: "# TODO 1\n", - line 243: "# TODO 2\n", - line 427: "# TODO 3a\n", - line 516: "# TODO 3b\n", - line 609: "# TODO 3c\n", courses/machine_learning/deepdive2/machine_learning_in_the_enterprise/solutions/sdk_custom_tabular_regression_online_explain.ipynb (5 lines): - line 977: "# TODO 1: Define your custom training job\n", - line 1282: "# TODO 2: Perform the model evaluation\n", - line 1565: "# TODO 3: Upload the model\n", - line 1635: "# TODO 4a and 4b: Deploy the model\n", - line 1885: "# TODO 5: Make the prediction with explanation\n", courses/machine_learning/deepdive2/text_classification/labs/word2vec.ipynb (5 lines): - line 890: " # TODO 1a -- your code goes here\n", - line 907: " # TODO 1b -- your code goes here\n", - line 1081: "# TODO 2a -- your code goes here" - line 3571: "# TODO 3a -- your code goes here" - line 5074: "# TODO 4a -- your code goes here" courses/machine_learning/deepdive2/feature_engineering/labs/sdk-feature-store.ipynb (5 lines): - line 341: "fs = # TODO 1a: Your code goes here(\n", - line 660: "movie_features = # TODO 1b: Your code goes here(\n", - line 1050: "# TODO 2: Your code goes here(\n", - line 1216: "# TODO 3: Your code goes here\n" - line 1577: "# TODO 4: Your code goes here(\n", courses/developingapps/v1.3/python/kubernetesengine/start/backend/quiz/gcp/languageapi.py (5 lines): - line 15: # TODO: Import the language module - line 22: # TODO: Create the Language API client - line 36: # TODO: Create a Document object - line 42: # TODO: Analyze the sentiment - line 49: # TODO: Return the sentiment score courses/machine_learning/deepdive2/text_classification/solutions/custom_tf_hub_word_embedding.ipynb (5 lines): - line 127: "## Step 1: Download the `text2hub` pipeline from AI Hub (TODO 1)" - line 176: "## Step 2: Upload the pipeline to the Kubeflow cluster (TODO 1)" - line 197: "## Step 3: Create a pipeline run (TODO 1)" - line 218: "## Step 4: Enter the run parameters (TODO 2)" - line 296: "## Step 5: Inspect the run artifacts (TODO 3)" courses/developingapps/v1.2/python/kubernetesengine/start/backend/quiz/gcp/languageapi.py (5 lines): - line 15: # TODO: Import the language module - line 22: # TODO: Create the Language API client - line 36: # TODO: Create a Document object - line 42: # TODO: Analyze the sentiment - line 49: # TODO: Return the sentiment score courses/developingapps/v1.2/python/kubernetesengine/end/backend/quiz/gcp/languageapi.py (5 lines): - line 15: # TODO: Import the language module - line 22: # TODO: Create the Language API client - line 36: # TODO: Create a Document object - line 42: # TODO: Analyze the sentiment - line 49: # TODO: Return the sentiment score courses/machine_learning/deepdive2/machine_learning_in_the_enterprise/labs/sdk_custom_xgboost.ipynb (5 lines): - line 46: "Each learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/sdk_custom_xgboost.ipynb)." - line 760: "job = # TODO: Your code goes here\n", - line 874: "model = # TODO: Your code goes here\n", - line 1000: "batch_predict_job = # TODO: Your code goes here (\n", - line 1135: "endpoint = # TODO: Your code goes here" quests/getting_started_apache_beam/beam_ml_toxicity_in_gaming/exercises/part2.py (5 lines): - line 106: # TODO: Follow Step 1: Create the model handler - line 112: # TODO: Follow Step 2: Submit the input into the model for a result - line 115: # TODO: Follow Step 3: Join your results together - line 124: # TODO: Follow Step 4: Transform your joined results into a string - line 125: # TODO: Follow Step 6: Join your results together courses/machine_learning/deepdive2/time_series_prediction/labs/optional_1_data_exploration.ipynb (5 lines): - line 252: " # TODO: query a specific stock\n", - line 306: "**TODO 2**: Compare individual stocks to the S&P 500." - line 327: "# TODO: visualize S&P 500 price" - line 408: " --# TODO: compute a year lag on avg_close\n", - line 651: "# TODO: can you visualize when the major stock splits occured?" courses/machine_learning/deepdive/09_sequence_keras/labs/sinemodel/model.py (5 lines): - line 42: # TODO: Finish linear model - line 48: # TODO: Finish DNN model - line 55: # TODO: Finish CNN model - line 64: # TODO: Finish rnn model - line 72: # TODO: Finish 2-layer rnn model courses/machine_learning/deepdive2/how_google_does_ml/bigquery/solution/analyze_with_bigquery_solution.ipynb (5 lines): - line 46: "PROJECT = \"\" #TODO Replace with your project id\n", - line 74: "TODO 2" - line 468: "TODO 1" - line 958: "TODO 3" - line 1739: "## Write basic SQL against the eCommerce data (TODO 4)\n", courses/machine_learning/deepdive2/image_classification/labs/5_fashion_mnist_class.ipynb (5 lines): - line 345: "# TODO 1\n" - line 453: "# TODO 2\n" - line 496: "# TODO 2 \n" - line 572: "# TODO 3" - line 789: "# TODO 3\n", courses/machine_learning/deepdive2/art_and_science_of_ml/labs/sdk-custom-image-classification-online.ipynb (5 lines): - line 733: "# TODO 1\n", - line 948: "# TODO 2\n", - line 1018: "# TODO 3\n", - line 1134: "# TODO 4\n", - line 1161: "# TODO 5\n", courses/ai-for-time-series/notebooks/02-model.ipynb (5 lines): - line 282: "## TODO 1: Remove outliers\n", - line 326: "# TODO: Update the threshold below to remove the outliers\n", - line 621: "#### TODO 2: Update the LSTM architecture\n", - line 679: "#### TODO 3: Update the CNN architecture\n", - line 692: "# TODO: Try adjusting the # of filters (pattern types) and kernel size (size of the sliding window)\n", courses/machine_learning/deepdive2/text_classification/solutions/word2vec.ipynb (5 lines): - line 889: " # TODO 1a\n", - line 905: " # TODO 1b\n", - line 1089: "# TODO 2a\n", - line 3580: "# TODO 3a\n", - line 5088: "# TODO 4a\n", courses/developingapps/v1.3/nodejs/pubsub-languageapi-spanner/start/server/api/index.js (5 lines): - line 21: // TODO: Load the ../gcp/pubsub module - line 58: // TODO: Publish the message into Cloud Pub/Sub - line 61: // TODO: Move the statement that returns a message to - line 68: // TODO: Add a catch - line 70: // TODO: There was an error, invoke the next middleware quests/dataflow/1_Basic_ETL/labs/src/main/java/com/mypackage/pipeline/MyPipeline.java (5 lines): - line 19: //TODO: Add imports - line 51: //TODO: Add CommonLog Class - line 53: //TODO: Add JsonToCommonLog DoFn - line 71: //TODO: Add static input and output strings - line 81: //TODO: Add pipeline.apply() and other steps courses/developingapps/java/cloudstorage/start/src/main/java/com/google/training/appdev/services/gcp/cloudstorage/ImageService.java (5 lines): - line 18: // TODO: Write a star import for Cloud Storage - line 35: // TODO: Create the storage client - line 46: // TODO: Get the name of the Cloud Storage bucket - line 66: // TODO: Create a new Cloud Storage object - line 76: // TODO: Cloud Storage public URLs are in the form: quests/endtoendml/solutions/babyweight/trainer/model.py (5 lines): - line 117: ## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL - line 121: ## TODO 2b: change the dnn_hidden_units to NNSIZE - line 134: ## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE - line 135: ## TODO 2d: and set max_steps to TRAIN_STEPS - line 142: ## TODO 2e: Lastly, set steps equal to EVAL_STEPS courses/machine_learning/deepdive/10_recommend/labs/hybrid_recommendations/hybrid_recommendations.ipynb (5 lines): - line 324: " # TODO: Create neural network input layer using our feature columns defined above\n", - line 326: " # TODO: Create hidden layers by looping through hidden unit list\n", - line 328: " # TODO: Compute logits (1 per class) using the output of our last hidden layer\n", - line 330: " # TODO: Find the predicted class indices based on the highest logit (which will result in the highest probability)\n", - line 370: " # TODO: Compute loss using the correct type of softmax cross entropy since this is classification and our labels (content id indices) and probabilities are mutually exclusive\n", courses/developingapps/v1.2/python/pubsub-languageapi-spanner/start/quiz/gcp/languageapi.py (5 lines): - line 15: # TODO: Import the language module - line 22: # TODO: Create the Language API client - line 36: # TODO: Create a Document object - line 42: # TODO: Analyze the sentiment - line 49: # TODO: Return the sentiment score quests/dataflow_python/6_SQL_Streaming_Analytics/lab/streaming_minute_traffic_SQL_pipeline.py (5 lines): - line 44: | 'ParseJson' >> # TODO: Apply parse_json function - line 45: | 'GetEventTimestamp' >> # TODO: Apply GetEventTimestampFn DoFn. - line 95: query = # TODO: Write SQL Query - line 100: (p | 'ReadFromPubSub' >> # TODO: Read from Pub/Sub Topic - line 101: | 'ParseAndGetEventTimestamp' >> # TODO: Apply ParseAndGetEventTimestamp custom PTransform with output type CommonLog courses/developingapps/v1.2/java/cloudstorage/start/src/main/java/com/google/training/appdev/services/gcp/cloudstorage/ImageService.java (5 lines): - line 18: // TODO: Write a star import for Cloud Storage - line 35: // TODO: Create the storage client - line 46: // TODO: Get the name of the Cloud Storage bucket - line 66: // TODO: Create a new Cloud Storage object - line 76: // TODO: Cloud Storage public URLs are in the form: quests/dataflow_python/1_Basic_ETL/lab/my_pipeline.py (5 lines): - line 1: # TODO: Add imports - line 5: # TODO: Add parse_json function - line 30: # TODO: Add static input and output strings - line 33: table_schema = # TODO: Add table schema - line 47: # TODO: Add transformation steps to pipeline courses/machine_learning/deepdive2/production_ml/solutions/distributed_training.ipynb (5 lines): - line 96: "# TODO 1\n", - line 243: "# TODO 2\n", - line 420: "# TODO 3a\n", - line 509: "# TODO 3b\n", - line 602: "# TODO 3c\n", courses/developingapps/nodejs/pubsub-languageapi-spanner/start/server/api/index.js (5 lines): - line 21: // TODO: Load the ../gcp/pubsub module - line 58: // TODO: Publish the message into Cloud Pub/Sub - line 61: // TODO: Move the statement that returns a message to - line 68: // TODO: Add a catch - line 70: // TODO: There was an error, invoke the next middleware courses/machine_learning/deepdive2/feature_engineering/solutions/sdk-feature-store-pandas.ipynb (5 lines): - line 324: "# TODO 1\n", - line 352: "# TODO 2\n", - line 666: "# TODO 3\n", - line 714: "# TODO 4\n", - line 874: "# TODO 5\n", courses/machine_learning/deepdive/08_image/labs/flowersmodel/model.py (5 lines): - line 101: image = #TODO: decode contents into JPEG - line 102: image = #TODO: convert JPEG tensor to floats between 0 and 1 - line 106: #TODO: Add image augmentation functions - line 134: dataset = #TODO: map read_and_preprocess_with_augment - line 136: dataset = #TODO: map read_and_preprocess quests/dataflow_python/2_Branching_Pipelines/lab/my_pipeline.py (5 lines): - line 16: # TODO: Define drop_fields function. - line 27: # TODO: Add command-line arguments for input path, output path and table name - line 38: # TODO: Set variables equal to input_path, output_path and table_name from - line 91: # TODO: Refactor pipeline to branch, with one branch writing directly to GCS - line 96: # TODO: Apply filter to elements courses/developingapps/v1.2/java/pubsub-languageapi-spanner/start/src/main/java/com/google/training/appdev/services/gcp/languageapi/LanguageService.java (4 lines): - line 31: // TODO: Create the LanguageServiceClient object - line 34: // TODO: Create a new Document object using the builder - line 42: // TODO: Use the client to analyze the sentiment of the feedback - line 48: // TODO: Return the sentiment score instead of 0.0f; courses/developingapps/v1.3/python/firebase/end/quiz/webapp/questions.py (4 lines): - line 14: # TODO: Import the storage module - line 29: # TODO: Use the storage client to Upload the file - line 39: # TODO: Return the public URL - line 53: # TODO: If there is an image file, then upload it courses/machine_learning/deepdive2/building_production_ml_systems/solutions/1_training_at_scale_vertex.ipynb (4 lines): - line 532: " #TODO 1a\n", - line 540: " #TODO 1b\n", - line 994: "# TODO 3\n", - line 1034: "TODO: To submit to the Cloud we use [`gcloud ai custom-jobs create`](https://cloud.google.com/sdk/gcloud/reference/ai/custom-jobs/create) and simply specify some additional parameters for Vertex AI Training Service:\n", courses/developingapps/v1.3/python/kubernetesengine/end/backend/quiz/gcp/pubsub.py (4 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Subscriber Client - line 33: # TODO: Create a Subscription object named worker-subscription - line 46: # TODO: Subscriber to the worker-subscription, courses/machine_learning/deepdive2/building_production_ml_systems/solutions/1_training_at_scale.ipynb (4 lines): - line 548: " #TODO 1a\n", - line 556: " #TODO 1b\n", - line 784: "#TODO 2\n", - line 844: "# TODO 3\n", courses/developingapps/python/kubernetesengine/end/backend/quiz/gcp/pubsub.py (4 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 25: # TODO: Create a Pub/Sub Subscriber Client - line 31: # TODO: Create a Subscription object named worker-subscription - line 44: # TODO: Subscriber to the worker-subscription, courses/machine_learning/asl/02_tensorflow/labs/a_tfstart_eager.ipynb (4 lines): - line 206: " # TODO: Your code goes here\n", - line 264: " w0 = # TODO: Your code goes here\n", - line 265: " w1 = # TODO: Your code goes here\n", - line 318: " # TODO: add new features.\n", courses/machine_learning/deepdive/02_tensorflow/labs/a_tfstart_eager.ipynb (4 lines): - line 206: " # TODO: Your code goes here\n", - line 264: " w0 = # TODO: Your code goes here\n", - line 265: " w1 = # TODO: Your code goes here\n", - line 318: " # TODO: add new features.\n", bootcamps/imagereco/fashionmodel/trainer/model.py (4 lines): - line 49: #TODO: apply a second convolution to the output of p1 - line 50: #TODO: apply a pooling layer with pool_size=2 and strides=2 - line 81: #TODO: create a dense layer and apply batch normalization, ensuring that activation is done once, after batch norming - line 83: #TODO: apply dropout to the batch normed dense layer courses/developingapps/python/firebase/end/quiz/webapp/questions.py (4 lines): - line 14: # TODO: Import the storage module - line 29: # TODO: Use the storage client to Upload the file - line 39: # TODO: Return the public URL - line 53: # TODO: If there is an image file, then upload it courses/machine_learning/deepdive2/feature_engineering/labs/7_get_started_with_feature_store.ipynb (4 lines): - line 657: " response = # TODO 1: Your code here\n", - line 1432: " entity_type= # TODO 2: Your code here\n", - line 1688: " request = # TODO 3: Your code here\n", - line 1815: " # TODO 4: Your code here\n", courses/developingapps/v1.2/python/firebase/end/quiz/webapp/questions.py (4 lines): - line 14: # TODO: Import the storage module - line 29: # TODO: Use the storage client to Upload the file - line 39: # TODO: Return the public URL - line 53: # TODO: If there is an image file, then upload it courses/machine_learning/deepdive2/image_classification/labs/2_mnist_models.ipynb (4 lines): - line 192: "**TODO 1**: Define the Keras layers for a DNN model \n", - line 193: "**TODO 2**: Define the Keras layers for a dropout model \n", - line 194: "**TODO 3**: Define the Keras layers for a CNN model \n", - line 506: "**TODO 4**: Write a `.json` file with image data to send to an AI Platform deployed model" courses/machine_learning/deepdive2/production_ml/solutions/comparing_local_trained_models.ipynb (4 lines): - line 967: " # TODO 1: Get the model\n", - line 974: " # TODO 2: Train the model\n", - line 1358: "# TODO 3: Evaluate model\n", - line 1566: "# TODO 4: Get experiment\n", quests/dataflow_python/3_Batch_Analytics/lab/batch_minute_traffic_pipeline.py (4 lines): - line 34: ts = #TODO: Extract timestamp from element and convert to a datetime object - line 39: window_start = # TODO: Extract window start time and convert to string to match BQ schema. - line 93: | "WindowByMinute" >> # TODO: Window into Fixed Windows of length 1 minute - line 94: | "CountPerMinute" >> # TODO: Count number of page views per window using combiner courses/machine_learning/deepdive2/production_ml/labs/training_example.ipynb (4 lines): - line 180: "# TODO: Your code goes here\n", - line 185: "# TODO: Your code goes here\n", - line 494: " # TODO: Your code goes here\n", - line 683: "# TODO: Your code goes here\n", courses/machine_learning/deepdive2/art_and_science_of_ml/labs/training_models_at_scale.ipynb (4 lines): - line 659: "# TODO 1a: Your code here\n", - line 665: "# TODO 1b: Your code here\n", - line 974: "# TODO 2: Your code here" - line 1033: "# TODO 3: Your code here" courses/machine_learning/deepdive2/building_production_ml_systems/labs/1_training_at_scale.ipynb (4 lines): - line 553: "# TODO 1a: Your code here\n", - line 559: "# TODO 1b: Your code here\n", - line 784: "# TODO 2: Your code here" - line 835: "# TODO 3: Your code here" courses/machine_learning/deepdive2/feature_engineering/solutions/1_bqml_basic_feat_eng.ipynb (4 lines): - line 705: "#TODO 1\n", - line 786: "#TODO 2\n", - line 1116: "#TODO 3a\n", - line 1350: "#TODO 3b\n", courses/developingapps/python/kubernetesengine/start/backend/quiz/gcp/pubsub.py (4 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 25: # TODO: Create a Pub/Sub Subscriber Client - line 31: # TODO: Create a Subscription object named worker-subscription - line 44: # TODO: Subscriber to the worker-subscription, courses/machine_learning/deepdive2/feature_engineering/solutions/7_get_started_with_feature_store.ipynb (4 lines): - line 658: "# TODO 1: Create the Feature resources for each of the EntityType resources in your Featurestore resource using the create_feature() method.\n", - line 1436: "# TODO 2: Import the resource identifier for the EntityType resource.\n", - line 1694: "# TODO 3: Read Feature values for multiple entities\n", - line 1830: " # TODO 4: Read the 'average_rating' and 'genres' feature values of the 'movies' entity\n", courses/machine_learning/deepdive2/recommendation_systems/solutions/basic_ranking.ipynb (4 lines): - line 305: "# TODO 1a\n", - line 385: " # TODO 2a\n", - line 393: " # TODO 2b\n", - line 834: "# TODO 3a\n", courses/machine_learning/deepdive2/machine_learning_in_the_enterprise/solutions/comparing_local_trained_models.ipynb (4 lines): - line 967: " # TODO 1: Get the model\n", - line 974: " # TODO 2: Train the model\n", - line 1358: "# TODO 3: Evaluate model\n", - line 1566: "# TODO 4: Get experiment\n", quests/dataflow_python/5_Streaming_Analytics/lab/streaming_minute_traffic_pipeline.py (4 lines): - line 143: parsed_msgs = (p | 'ReadFromPubSub' >> #TODO: Use ReadFromPubSub to read in messages - line 157: | "WindowByMinute" >> # TODO: Window into 1 minute long windows - line 158: | "CountPerMinute" >> # TODO: Count number of messages per window. - line 160: | 'WriteAggToBQ' >> # TODO: Write aggregated data to BigQuery table courses/developingapps/v1.3/python/kubernetesengine/start/backend/quiz/gcp/pubsub.py (4 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Subscriber Client - line 33: # TODO: Create a Subscription object named worker-subscription - line 46: # TODO: Subscriber to the worker-subscription, courses/developingapps/v1.2/python/kubernetesengine/end/backend/quiz/gcp/pubsub.py (4 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Subscriber Client - line 33: # TODO: Create a Subscription object named worker-subscription - line 46: # TODO: Subscriber to the worker-subscription, quests/serverlessml/01_explore/labs/explore_data.ipynb (4 lines): - line 85: " # TODO 1: Specify the correct BigQuery public dataset for nyc-tlc yellow taxi cab trips\n", - line 156: "# TODO 2: Visualize your dataset using the Seaborn library. Plot the distance of the trip as X and the fare amount as Y\n", - line 192: " # TODO 3: Filter the data to only include non-zero distance trips and fares above $2.50\n", - line 525: "# TODO 4: Create a benchmark to judge future ML model performance off of\n", courses/developingapps/v1.2/python/kubernetesengine/bonus/backend/quiz/gcp/pubsub.py (4 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 25: # TODO: Create a Pub/Sub Subscriber Client - line 31: # TODO: Create a Subscription object named worker-subscription - line 44: # TODO: Subscriber to the worker-subscription, quests/bq-teradata/02_teradata_bq_sql_translation/labs/teradata_bq_sql_translation.ipynb (4 lines): - line 180: " --TODO:\n", - line 185: " --TODO:\n", - line 219: " --TODO:\n", - line 224: " --TODO:\n", quests/dei/what-if-tool-challenge.ipynb (4 lines): - line 351: "# ---- TODO ---------\n", - line 418: "# ---- TODO ---------\n", - line 480: "# ---- TODO ---------\n", - line 600: "# ---- TODO ------ \n", quests/endtoendml/labs/4_preproc.ipynb (4 lines): - line 16: "__TODO__: Complete the lab notebook #TODO sections. You can refer to the [solutions/](../solutions/4_preproc.ipynb) notebook for reference. \n", - line 191: " # TODO create logic for no_ultrasound where we only know whether its a single baby or multiple (but not how many multiple)\n", - line 193: " if # TODO create logic check for multiples\n", - line 195: " else: # TODO create logic check for single\n", courses/developingapps/v1.3/python/cloudstorage/start/quiz/webapp/questions.py (4 lines): - line 14: # TODO: Import the storage module - line 29: # TODO: Use the storage client to Upload the file - line 36: # TODO: Return the public URL - line 50: # TODO: If there is an image file, then upload it courses/developingapps/python/cloudstorage/start/quiz/webapp/questions.py (4 lines): - line 14: # TODO: Import the storage module - line 29: # TODO: Use the storage client to Upload the file - line 36: # TODO: Return the public URL - line 50: # TODO: If there is an image file, then upload it courses/machine_learning/deepdive2/building_production_ml_systems/solutions/4b_streaming_data_inference_vertex.ipynb (4 lines): - line 203: "**TODO:** Open the file ./taxicab_traffic/streaming_count.py and find the TODO there. Specify a sliding window that is 5 minutes long, and gets recalculated every 15 seconds. Hint: Reference the [beam programming guide](https://beam.apache.org/documentation/programming-guide/#windowing) for guidance. To check your answer reference the solution. \n", - line 281: "# TODO 2a. Write a function to take most recent entry in `traffic_realtime` table and add it to instance.\n", - line 339: "# TODO 2b. Write code to call prediction on instance using realtime traffic info.\n", - line 342: "ENDPOINT_ID = # TODO: Copy the `ENDPOINT_ID` from the deployment in the previous lab.\n", courses/developingapps/v1.2/python/firebase/start/quiz/webapp/questions.py (4 lines): - line 14: # TODO: Import the storage module - line 29: # TODO: Use the storage client to Upload the file - line 39: # TODO: Return the public URL - line 53: # TODO: If there is an image file, then upload it courses/developingapps/v1.2/python/cloudstorage/start/quiz/webapp/questions.py (4 lines): - line 14: # TODO: Import the storage module - line 29: # TODO: Use the storage client to Upload the file - line 36: # TODO: Return the public URL - line 50: # TODO: If there is an image file, then upload it courses/data_analysis/deepdive/bigtable-exercises/src/main/java/com/google/cloud/bigtable/training/solutions/Ex1Solution.java (4 lines): - line 110: // TODO: Try running `cbt count ` to make sure the actual row count matches - line 148: // TODO: For each key/value pair in the map, add a column to the Put. - line 155: // TODO 2: For each data point, write a single row into Bigtable. - line 163: // TODO 4: Add the mutation to the BufferedMutator courses/developingapps/v1.3/java/pubsub-languageapi-spanner/start/src/main/java/com/google/training/appdev/services/gcp/languageapi/LanguageService.java (4 lines): - line 31: // TODO: Create the LanguageServiceClient object - line 34: // TODO: Create a new Document object using the builder - line 42: // TODO: Use the client to analyze the sentiment of the feedback - line 48: // TODO: Return the sentiment score instead of 0.0f; courses/machine_learning/deepdive2/recommendation_systems/labs/content_based_by_hand.ipynb (4 lines): - line 222: "**TODO 1**: Calculuate this as the matrix multiplication of the `users_movies` tensor with the `movies_feats` tensor." - line 254: "users_feats = # TODO: Use matrix multplication to find the user features.\n", - line 392: "**TODO 2**: Implement this as a matrix multiplication. *Hint*: one of the operands will need to be transposed." - line 420: "users_ratings = # TODO: Use matrix multplication to find user ratings.\n", courses/machine_learning/deepdive/08_image/labs/mnistmodel/trainer/model.py (4 lines): - line 35: # TODO: Implement DNN model with three hiddenlayers - line 39: # TODO: Implement DNN model and apply dropout to the last hidden layer - line 54: # TODO: apply a second convolution to the output of p1 - line 56: # TODO: apply a pooling layer with pool_size = 2 and strides = 2 courses/developingapps/python/cloudstorage/end/quiz/webapp/questions.py (4 lines): - line 14: # TODO: Import the storage module - line 29: # TODO: Use the storage client to Upload the file - line 39: # TODO: Return the public URL - line 53: # TODO: If there is an image file, then upload it courses/machine_learning/deepdive/09_sequence_keras/labs/txtclsmodel/trainer/model.py (4 lines): - line 89: x = # TODO (hint: use tokenizer) - line 94: x = # TODO (hint: there is a useful function in tf.keras.preprocessing...) - line 182: estimator = # TODO: convert keras model to tf.estimator.Estimator - line 262: estimator = # TODO: create estimator courses/machine_learning/deepdive2/recommendation_systems/labs/basic_ranking.ipynb (4 lines): - line 304: "# TODO 1a -- your code goes here" - line 383: " # TODO 2a -- your code goes here\n", - line 387: " # TODO 2b -- your code goes here\n", - line 820: "# TODO 3a -- your code goes here" courses/machine_learning/deepdive/08_image_keras/labs/mnistmodel/trainer/model.py (4 lines): - line 38: # TODO: Implement DNN model with three hidden layers - line 44: # TODO: Implement DNN model and apply dropout to the last hidden layer - line 58: model.add(# TODO: Apply a second convolution with nfil2 filters) # shape = (?, 14, 14, nfil2) - line 59: model.add(# TODO: Apply a pooling layer with pool_size = 2 and strides = 2) # shape = (?, 7, 7, nfil2) courses/developingapps/v1.3/nodejs/stackdriver-debug-errorreporting/start/quiz-app/app.js (4 lines): - line 15: // TODO: Add the following statement to import and start - line 26: // TODO: Load the error-reporting module - line 41: // TODO: Create the errorReporting client object - line 69: // TODO: Use Stackdriver Error Reporting courses/machine_learning/deepdive2/recommendation_systems/labs/als_bqml.ipynb (4 lines): - line 272: "**TODO 1**: Make a prediction for user 903 that does not include already seen movies." - line 292: " WHERE # TODO: Complete this WHERE to remove seen movies.\n", - line 308: "**TODO 2**: Find the top five users who will likely enjoy *American Mullet (2001)*" - line 325: " # TODO: Select all users\n", courses/developingapps/java/pubsub-languageapi-spanner/start/src/main/java/com/google/training/appdev/services/gcp/languageapi/LanguageService.java (4 lines): - line 31: // TODO: Create the LanguageServiceClient object - line 34: // TODO: Create a new Document object using the builder - line 42: // TODO: Use the client to analyze the sentiment of the feedback - line 48: // TODO: Return the sentiment score instead of 0.0f; courses/developingapps/python/firebase/start/quiz/webapp/questions.py (4 lines): - line 14: # TODO: Import the storage module - line 29: # TODO: Use the storage client to Upload the file - line 39: # TODO: Return the public URL - line 53: # TODO: If there is an image file, then upload it courses/developingapps/v1.2/java/datastore/end/src/main/java/com/google/training/appdev/services/gcp/datastore/QuestionService.java (4 lines): - line 53: // TODO: Create the query - line 63: // TODO: Execute the query - line 70: // TODO: Return the transformed results - line 81: /* TODO: Uncomment this block quests/dataflow_scala/3_Batch_Analytics/labs/src/main/scala/scio/mypackage/pipeline/BatchMinuteTrafficPiplene.scala (4 lines): - line 18: //TODO: Add imports - line 27: //TODO: Add CommonLog Class - line 91: //Step3: TODO: Calculate record count for each window with fixed duration of 1 Minute - line 93: /* Step4: TODO: Transform windowed record count into PageView Beam Row courses/machine_learning/deepdive2/launching_into_ml/labs/explore_data.ipynb (4 lines): - line 68: "# TODO 1: Set correct BigQuery public dataset for nyc-tlc yellow taxi cab trips\n", - line 140: "# TODO 2: Visualize your dataset using the Seaborn library.\n", - line 178: " # TODO 3: Filter the data to only include non-zero distance trips and fares above $2.50\n", - line 540: "# TODO 4: Create a benchmark to judge future ML model performance off of\n", courses/developingapps/v1.2/nodejs/stackdriver-debug-errorreporting/start/quiz-app/app.js (4 lines): - line 15: // TODO: Add the following statement to import and start - line 26: // TODO: Load the error-reporting module - line 41: // TODO: Create the errorReporting client object - line 69: // TODO: Use Stackdriver Error Reporting courses/developingapps/v1.3/python/cloudstorage/end/quiz/webapp/questions.py (4 lines): - line 14: # TODO: Import the storage module - line 29: # TODO: Use the storage client to Upload the file - line 39: # TODO: Return the public URL - line 53: # TODO: If there is an image file, then upload it courses/developingapps/v1.2/python/cloudstorage/end/quiz/webapp/questions.py (4 lines): - line 14: # TODO: Import the storage module - line 29: # TODO: Use the storage client to Upload the file - line 39: # TODO: Return the public URL - line 53: # TODO: If there is an image file, then upload it courses/machine_learning/deepdive2/feature_engineering/labs/1_bqml_basic_feat_eng_bqml-lab.ipynb (4 lines): - line 384: "#TODO 1 - your code here\n" - line 429: " #TODO 2 - Your code here\n", - line 555: " #TODO 3a -- Your code here \n", - line 655: " #TODO 3b -- Your code here \n", quests/dataflow_python/4_SQL_Batch_Analytics/lab/batch_user_traffic_SQL_pipeline.py (4 lines): - line 10: # TODO: Import SqlTransform - line 133: #TODO: Write SQL query - line 143: | 'WriteRawToBQ' >> # TODO: Write Transform to write raw data to BigQuery - line 145: (logs | 'PerUserAggregations' >> # TODO: Apply SqlTransform using ZetaSQL Dialect courses/ai-for-finance/practice/arima_model.ipynb (4 lines): - line 90: "df_week = # TODO: Use the df DataFrame to resample the 'close' column to a weekly granularity. Use the mean as the aggregator. \n", - line 276: "ar1 = # TODO: Fit an ARIMA model to the differenced data\n", - line 293: "# TODO: Plot the ARMA fitted values on the same plot as the differenced time series" - line 309: "forecast = # TODO: Use the ARMA model to create a forecast two weeks into the future\n", courses/developingapps/v1.3/java/datastore/end/src/main/java/com/google/training/appdev/services/gcp/datastore/QuestionService.java (4 lines): - line 53: // TODO: Create the query - line 63: // TODO: Execute the query - line 70: // TODO: Return the transformed results - line 81: /* TODO: Uncomment this block courses/developingapps/java/datastore/end/src/main/java/com/google/training/appdev/services/gcp/datastore/QuestionService.java (4 lines): - line 53: // TODO: Create the query - line 63: // TODO: Execute the query - line 70: // TODO: Return the transformed results - line 81: /* TODO: Uncomment this block courses/developingapps/v1.3/python/kubernetesengine/bonus/backend/quiz/gcp/pubsub.py (4 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 25: # TODO: Create a Pub/Sub Subscriber Client - line 31: # TODO: Create a Subscription object named worker-subscription - line 44: # TODO: Subscriber to the worker-subscription, courses/developingapps/python/kubernetesengine/end/backend/start/backend/quiz/gcp/pubsub.py (4 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 25: # TODO: Create a Pub/Sub Subscriber Client - line 31: # TODO: Create a Subscription object named worker-subscription - line 44: # TODO: Subscriber to the worker-subscription, courses/developingapps/nodejs/stackdriver-debug-errorreporting/start/quiz-app/app.js (4 lines): - line 15: // TODO: Add the following statement to import and start - line 26: // TODO: Load the error-reporting module - line 41: // TODO: Create the errorReporting client object - line 69: // TODO: Use Stackdriver Error Reporting courses/developingapps/python/kubernetesengine/bonus/backend/quiz/gcp/pubsub.py (4 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 25: # TODO: Create a Pub/Sub Subscriber Client - line 31: # TODO: Create a Subscription object named worker-subscription - line 44: # TODO: Subscriber to the worker-subscription, courses/developingapps/v1.3/python/firebase/start/quiz/webapp/questions.py (4 lines): - line 14: # TODO: Import the storage module - line 29: # TODO: Use the storage client to Upload the file - line 39: # TODO: Return the public URL - line 53: # TODO: If there is an image file, then upload it courses/machine_learning/deepdive/03_tensorflow/labs/a_tfstart.ipynb (4 lines): - line 230: " #TODO: Write TensorFlow code to compute area of a triangle\n", - line 269: " #TODO: Write TensorFlow code to compute area of a \n", - line 307: " #TODO: Rather than feeding the side values as a constant, \n", - line 343: "#TODO: Using your non-placeholder solution, \n", courses/developingapps/v1.2/python/kubernetesengine/start/backend/quiz/gcp/pubsub.py (4 lines): - line 19: # TODO: Load the Cloud Pub/Sub module - line 27: # TODO: Create a Pub/Sub Subscriber Client - line 33: # TODO: Create a Subscription object named worker-subscription - line 46: # TODO: Subscriber to the worker-subscription, courses/machine_learning/deepdive/09_sequence/labs/txtclsmodel/trainer/model.py (4 lines): - line 89: x = # TODO (hint: use tokenizer) - line 94: x = # TODO (hint: there is a useful function in tf.keras.preprocessing...) - line 182: estimator = # TODO: convert keras model to tf.estimator.Estimator - line 262: estimator = # TODO: create estimator courses/machine_learning/deepdive2/recommendation_systems/labs/content_based_using_neural_networks.ipynb (3 lines): - line 326: " dataset = # TODO 1: Your code here\n", - line 384: " accuracy = # TODO 2: Your code here\n", - line 1976: " input_fn = # TODO 3: Your code here\n", quests/endtoendml/solutions/labs/4_preproc.ipynb (3 lines): - line 151: " # TODO #1:\n", - line 240: " ## TODO Task #2: Modify the Apache Beam pipeline such that the first part of the pipe reads the data from BigQuery\n", - line 251: "# TODO Task #3: Once you have verified that the files produced locally are correct, change in_test_mode to False\n", courses/machine_learning/deepdive2/recommendation_systems/solutions/content_based_using_neural_networks.ipynb (3 lines): - line 325: " # TODO 1: Create dataset from file list\n", - line 383: " # TODO 2: Compute evaluation metrics.\n", - line 1976: "# TODO 3: Provide input data for training\n", courses/machine_learning/deepdive2/production_ml/labs/distributed_training_with_TF.ipynb (3 lines): - line 232: "# TODO 1 - Your code goes here.\n" - line 663: "# TODO 2 - Your code goes here.\n", - line 1009: "# TODO 3 - Your code goes here.\n" courses/machine_learning/deepdive2/structured/labs/serving/application/lib/pyasn1_modules/rfc2985.py (3 lines): - line 86: # TODO: - line 269: # TODO: Once PKCS15Token can be imported, this can be included - line 543: # TODO: Once PKCS15Token can be imported, this can be included courses/machine_learning/deepdive2/production_ml/labs/post_training_quant.ipynb (3 lines): - line 142: "# TODO 1 - Your code goes here.\n" - line 312: "# TODO 2 - Your code goes here.\n" - line 400: " # TODO 3 - Your code goes here.\n", courses/machine_learning/deepdive2/production_ml/solutions/distributed_training_with_TF.ipynb (3 lines): - line 232: "# TODO 1 - Here is your code.\n", - line 665: "# TODO 2 - Here is your code.\n", - line 1013: "# TODO 3 - Here is your code.\n", courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/lib/pyasn1_modules/rfc2985.py (3 lines): - line 86: # TODO: - line 269: # TODO: Once PKCS15Token can be imported, this can be included - line 543: # TODO: Once PKCS15Token can be imported, this can be included courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/quickstart.ipynb (3 lines): - line 301: "# TODO 1\n", - line 411: "# TODO 2\n", - line 464: "# TODO 3\n", courses/machine_learning/deepdive2/production_ml/solutions/post_training_quant.ipynb (3 lines): - line 142: "# TODO 1 - Here is your code.\n", - line 318: "# TODO 2 - Here is your code.\n", - line 408: " # TODO 3 - Here is your code.\n", courses/machine_learning/deepdive2/recommendation_systems/solutions/multitask.ipynb (3 lines): - line 411: "# TODO 1: Here is your code.\n", - line 889: "# TODO 2: Here is your code.\n", - line 1349: "# TODO 3: Here is your code.\n", courses/ai-for-time-series/notebooks/01-explore.ipynb (3 lines): - line 223: "### TODO 1: Analyze the patterns\n", - line 282: "### TODO 2: Review summary statistics\n", - line 301: "### TODO 3: Explore seasonality\n", quests/dataflow_scala/1_Basic_ETL/labs/src/main/scala/scio/mypackage/pipeline/MyPipeline.scala (3 lines): - line 18: //TODO: Add imports - line 28: //TODO: Add CommonLog Class - line 30: //TODO: Add JsonToCommonLog DoFn courses/machine_learning/deepdive2/machine_learning_in_the_enterprise/labs/build_model_experimentation_lineage_with_prebuild_code.ipynb (3 lines): - line 636: "# TODO: Your code goes here (\n", - line 750: "raw_dataset_artifact = # TODO: Your code goew here (\n", - line 831: "with # TODO: Your code goes here (\n", courses/machine_learning/deepdive2/building_production_ml_systems/solutions/4b_streaming_data_inference.ipynb (3 lines): - line 213: "**TODO:** Open the file ./taxicab_traffic/streaming_count.py and find the TODO there. Specify a sliding window that is 5 minutes long, and gets recalculated every 15 seconds. Hint: Reference the [beam programming guide](https://beam.apache.org/documentation/programming-guide/#windowing) for guidance. To check your answer reference the solution. \n", - line 300: "# TODO 2a. Write a function to take most recent entry in `traffic_realtime` table and add it to instance.\n", - line 351: "# TODO 2b. Write code to call prediction on instance using realtime traffic info.\n", courses/developingapps/v1.3/nodejs/stackdriver-debug-errorreporting/end/quiz-app/app.js (3 lines): - line 15: // TODO: Add the following statement to import and start - line 26: // TODO: Load the error-reporting module - line 39: // TODO: Create the errorReporting client object courses/machine_learning/deepdive/04_advanced_preprocessing/labs/a_dataflow.ipynb (3 lines): - line 302: " p | \"read_{}\".format(phase) >> # TODO: Your code goes here\n", - line 303: " | \"tocsv_{}\".format(phase) >> # TODO: Your code goes here\n", - line 304: " | \"write_{}\".format(phase) >> # TODO: Your code goes here\n", courses/machine_learning/deepdive/05_artandscience/labs/d_customestimator_linear.ipynb (3 lines): - line 292: " predictions_dict = #TODO: create predictions dictionary\n", - line 295: " export_outputs = #TODO: create export_outputs dictionary\n", - line 345: " estimator = # TODO: Add estimator, make sure to add params={'feature_columns': list(feature_columns.values())} as an argument\n", courses/machine_learning/deepdive2/production_ml/solutions/simple.ipynb (3 lines): - line 321: " # TODO 1: Save the transform_fn to the output_dir\n", - line 671: "# TODO 2: Load a SavedModel from export_dir\n", - line 998: "# TODO 3: Export the model\n", courses/machine_learning/deepdive2/image_classification/solutions/4_tpu_training.ipynb (3 lines): - line 48: "**TODO #1: Set up a TPU strategy**" - line 107: " # TODO: define a TPU strategy\n", - line 222: "**TODO #2 and #3: Specify the `tpu_address` and `hub_path`**" courses/machine_learning/deepdive2/tensorflow_extended/solutions/penguin_simple.ipynb (3 lines): - line 680: "# TODO 1\n", - line 918: "# TODO 2\n", - line 2435: "# TODO 3\n", courses/machine_learning/deepdive2/art_and_science_of_ml/solutions/training_models_at_scale.ipynb (3 lines): - line 659: " #TODO 1a\n", - line 667: " #TODO 1b\n", - line 974: "#TODO 2\n", courses/machine_learning/deepdive2/tensorflow_extended/labs/penguin_transform.ipynb (3 lines): - line 656: "DATA_ROOT = # TODO 1: Your code here\n", - line 1149: " # TODO 2: Your code goes here\n", - line 5187: "loaded_model = # TODO 3: Your code here\n", quests/dataflow_python/3_Batch_Analytics/lab/batch_user_traffic_pipeline.py (3 lines): - line 27: # TODO: Finish defining class for schema - line 30: # TODO: Register coder for PerUserAggregation - line 101: | 'PerUserAggregations' >> # TODO: Perform aggregations quests/endtoendml/labs/6_deploy.ipynb (3 lines): - line 16: "__TODO__: Complete the lab notebook #TODO sections. You can refer to the [solutions/](../solutions/6_deploy.ipynb) notebook for reference. \n" - line 105: "# TODO: Create the model \n", - line 108: "# TODO: Create the model version \n", courses/machine_learning/deepdive2/production_ml/labs/build_model_experimentation_lineage_with_prebuild_code.ipynb (3 lines): - line 636: "# TODO: Your code goes here (\n", - line 750: "raw_dataset_artifact = # TODO: Your code goew here (\n", - line 831: "with # TODO: Your code goes here (\n", courses/machine_learning/deepdive2/production_ml/labs/simple.ipynb (3 lines): - line 322: " _ = # TODO 1: Your code goes here \n", - line 670: "loaded = # TODO 2: Your code goes here\n", - line 997: "export_model = # TODO 3: Your code goes here" courses/developingapps/v1.2/nodejs/stackdriver-debug-errorreporting/end/quiz-app/app.js (3 lines): - line 15: // TODO: Add the following statement to import and start - line 26: // TODO: Load the error-reporting module - line 39: // TODO: Create the errorReporting client object courses/data_analysis/deepdive/bigtable-exercises/src/main/java/com/google/cloud/bigtable/training/Ex1.java (3 lines): - line 151: // TODO: For each key/value pair in the map, add a column to the Put. - line 158: // TODO: For each data point, write a single row into Bigtable. - line 163: // TODO: Add the mutation to the BufferedMutator courses/ai-for-finance/practice/momentum_using_hurst.ipynb (3 lines): - line 197: " ### TODO : FILL THIS FUNCTION TO RETURN A BUY (1), SELL (0) or LEAVE POSITION (0.5) prediction \n", - line 205: " # TODO: Fill in the logic for the Hurst Exponent\n", - line 219: " # TODO: We're trading on the 30 day momentum here and losing money, try trading on the basis of Hurst\n", courses/machine_learning/deepdive2/recommendation_systems/labs/multitask.ipynb (3 lines): - line 411: "# TODO 1: Your code goes here.\n" - line 887: "# TODO 2: Your code goes here.\n" - line 1345: "# TODO 3: Your code goes here.\n" quests/dataflow/7_Advanced_Streaming_Analytics/solution/src/main/java/com/mypackage/pipeline/StreamingMinuteTrafficPipeline.java (3 lines): - line 141: //TODO: change window_end in other labs - line 224: // TODO: is this a streaming insert? - line 240: //TODO: change this to actual full parameter courses/machine_learning/asl/01_bigquery/labs/b_bqml.ipynb (3 lines): - line 132: "# TODO: Your code goes here" - line 162: "# TODO: Your code goes here" - line 195: "# TODO: Your code goes here" courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/pyasn1_modules/rfc2985.py (3 lines): - line 86: # TODO: - line 269: # TODO: Once PKCS15Token can be imported, this can be included - line 543: # TODO: Once PKCS15Token can be imported, this can be included courses/machine_learning/deepdive2/tensorflow_extended/labs/penguin_simple.ipynb (3 lines): - line 681: "# TODO 1: Your code goes here\n", - line 933: " pusher = # TODO 2: Your code goes here\n", - line 2431: "# TODO 3: Your code goes here\n" courses/developingapps/v1.3/nodejs/stackdriver-debug-errorreporting/bonus/quiz-app/app.js (3 lines): - line 15: // TODO: Add the following statement to import and start - line 26: // TODO: Load the error-reporting module - line 39: // TODO: Create the errorReporting client object courses/machine_learning/deepdive2/end_to_end_ml/solutions/sample_babyweight.ipynb (3 lines): - line 123: "# TODO 1\n", - line 1785: "# TODO 2\n", - line 2090: "# TODO 3\n", courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving/application/lib/pyasn1_modules/rfc2985.py (3 lines): - line 86: # TODO: - line 269: # TODO: Once PKCS15Token can be imported, this can be included - line 543: # TODO: Once PKCS15Token can be imported, this can be included courses/developingapps/v1.2/nodejs/stackdriver-debug-errorreporting/bonus/quiz-app/app.js (3 lines): - line 15: // TODO: Add the following statement to import and start - line 26: // TODO: Load the error-reporting module - line 39: // TODO: Create the errorReporting client object quests/dataflow_python/4_SQL_Batch_Analytics/lab/batch_minute_traffic_SQL_pipeline.py (3 lines): - line 36: # TODO: Add formatted timestamp as a string. - line 89: # TODO: Write SQL Query - line 98: | "CountPerMinute" >> # TODO: Use SqlTransform with ZetaSQL dialect courses/machine_learning/deepdive2/tensorflow_extended/solutions/penguin_transform.ipynb (3 lines): - line 655: "DATA_ROOT = tempfile.mkdtemp(prefix='tfx-data') # TODO 1: Create a temporary directory.\n", - line 1147: " # TODO 2: Computes statistics over data for visualization and example validation.\n", - line 5186: "# TODO 3: Load a model saved via model.save()\n", courses/machine_learning/deepdive/06_structured/labs/4_preproc.ipynb (3 lines): - line 151: " # TODO #1:\n", - line 240: " ## TODO Task #2: Modify the Apache Beam pipeline such that the first part of the pipe reads the data from BigQuery\n", - line 251: "# TODO Task #3: Once you have verified that the files produced locally are correct, change in_test_mode to False\n", courses/machine_learning/asl/04_advanced_preprocessing/labs/a_dataflow.ipynb (3 lines): - line 302: " p | \"read_{}\".format(phase) >> # TODO: Your code goes here\n", - line 303: " | \"tocsv_{}\".format(phase) >> # TODO: Your code goes here\n", - line 304: " | \"write_{}\".format(phase) >> # TODO: Your code goes here\n", courses/machine_learning/deepdive/04_features/labs/a_features.ipynb (3 lines): - line 235: " # TODO: Add more features to the dataframe\n", - line 276: " # TODO: Define additional feature columns\n", - line 293: " # TODO: Create tf.estimator.LinearRegressor, train_spec, eval_spec, and train_and_evaluate using your feature columns" courses/machine_learning/deepdive/01_bigquery/labs/b_bqml.ipynb (3 lines): - line 132: "# TODO: Your code goes here" - line 162: "# TODO: Your code goes here" - line 195: "# TODO: Your code goes here" courses/developingapps/v1.2/python/kubernetesengine/start/frontend/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, return the result courses/developingapps/python/kubernetesengine/start/frontend/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, return the result courses/machine_learning/deepdive2/text_classification/solutions/text_similarity.ipynb (2 lines): - line 58: "# TODO 1: Install TF.Text TensorFlow library\n", - line 226: "# TODO 2: Compute ROUGE-L with alpha=1\n", courses/machine_learning/asl/open_project/document_processing/notebooks/document_processing.ipynb (2 lines): - line 197: "TODO: We should ideally restrict the permissions to AutoML and Vision roles only" - line 1131: "TODO: We should include some section on how to evaluate the performance of the extractor. Here we can use the ground_truth table and explore different kinds of string metrics (e.g. Levenshtein distance) to measure accuracy of the entity extraction." courses/machine_learning/deepdive/06_structured/labs/serving/pipeline/src/main/java/com/google/cloud/training/mlongcp/BabyweightMLService.java (2 lines): - line 28: private static final String PROJECT = "cloud-training-demos"; // TODO: put in your project name here - line 30: private static String VERSION = "ml_on_gcp"; // TODO: put in your version name here courses/machine_learning/deepdive2/text_classification/labs/rnn_encoder_decoder.ipynb (2 lines): - line 285: " sentence_pairs = # TODO 1a\n", - line 340: " # TODO 1b\n", courses/machine_learning/deepdive2/recommendation_systems/solutions/content_based_by_hand.ipynb (2 lines): - line 222: "**TODO 1**: Calculuate this as the matrix multiplication of the `users_movies` tensor with the `movies_feats` tensor." - line 392: "**TODO 2**: Implement this as a matrix multiplication. *Hint*: one of the operands will need to be transposed." courses/machine_learning/deepdive2/structured/labs/serving/application/templates/form.html (2 lines): - line 49: # TODO: Add option for Twins and Triplets too - line 57: # TODO: Add radio-button for Female courses/machine_learning/deepdive2/recommendation_systems/solutions/als_bqml_hybrid_old.ipynb (2 lines): - line 618: "**TODO 1**: Combine the above two queries to get the user factors and product factor for each rating." - line 980: "**TODO 2**: Create a function that returns named columns from a size 16 product factor array." quests/dataflow/4_SQL_Batch_Analytics/labs/src/main/java/com/mypackage/pipeline/BatchMinuteTrafficSQLPipeline.java (2 lines): - line 126: // TODO: Add JODA Timestamp field to Row, Write Windowing SQL transform - line 129: // TODO: Uncomment BigQueryIO Sink courses/machine_learning/deepdive2/structured/labs/serving/application/lib/httplib2/__init__.py (2 lines): - line 435: # TODO: add current time as _entry_disposition argument to avoid sleep in tests - line 755: # TODO: self.challenge['domain'] courses/machine_learning/deepdive2/building_production_ml_systems/solutions/2_hyperparameter_tuning_vertex.ipynb (2 lines): - line 392: "# TODO 1\n", - line 397: "# TODO 1\n", courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/httplib2/__init__.py (2 lines): - line 435: # TODO: add current time as _entry_disposition argument to avoid sleep in tests - line 755: # TODO: self.challenge['domain'] courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/lib/pyasn1/codec/der/encoder.py (2 lines): - line 32: # TODO: move out of sorting key function - line 39: # TODO: support nested CHOICE ordering quests/endtoendml/labs/3_keras_wd.ipynb (2 lines): - line 24: "__TODO__: Complete the lab notebook #TODO sections. You can refer to the [solutions/](../solutions/3_keras_wd.ipynb) notebook for reference. " - line 239: " # TODO bucketize the float fields. This makes them wide\n", courses/developingapps/v1.3/python/kubernetesengine/start/frontend/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, return the result courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving/application/lib/httplib2/__init__.py (2 lines): - line 435: # TODO: add current time as _entry_disposition argument to avoid sleep in tests - line 755: # TODO: self.challenge['domain'] quests/dataflow/5_Streaming_Analytics/labs/src/main/java/com/mypackage/pipeline/StreamingMinuteTrafficPipeline.java (2 lines): - line 140: //TODO: Read from PubSub - line 144: //TODO: Write aggregation logic and BQ Write courses/machine_learning/deepdive/03_tensorflow/labs/c_dataset.ipynb (2 lines): - line 81: "# TODO: Create an appropriate input function read_dataset\n", - line 83: " #TODO Add CSV decoder function and dataset creation and methods\n", courses/machine_learning/deepdive2/recommendation_systems/solutions/als_bqml.ipynb (2 lines): - line 272: "**TODO 1**: Make a prediction for user 903 that does not include already seen movies." - line 308: "**TODO 2**: Find the top five users who will likely enjoy *American Mullet (2001)*" courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/pyasn1/type/univ.py (2 lines): - line 1711: # TODO: remove when Py2.5 support is gone - line 1939: # TODO: we should wrap componentType with UnnamedType to carry courses/developingapps/v1.2/python/pubsub-languageapi-spanner/end/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, courses/machine_learning/asl/05_review/labs/serving/pipeline/src/main/java/com/google/cloud/training/mlongcp/BabyweightMLService.java (2 lines): - line 28: private static final String PROJECT = "cloud-training-demos"; // TODO: put in your project name here - line 30: private static String VERSION = "ml_on_gcp"; // TODO: put in your version name here courses/developingapps/python/kubernetesengine/end/backend/start/frontend/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, return the result courses/developingapps/v1.3/python/pubsub-languageapi-spanner/start/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, courses/machine_learning/deepdive2/end_to_end_ml/labs/preproc.ipynb (2 lines): - line 503: " # TODO 1\n", - line 504: " # TODO -- Your code here.\n", courses/developingapps/v1.2/java/pubsub-languageapi-spanner/start/src/main/java/com/google/training/appdev/api/QuizEndpoint.java (2 lines): - line 44: // TODO: Declare the publishService - line 76: // TODO: Publish the feedback to Pub/Sub courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/lib/pyasn1/type/univ.py (2 lines): - line 1711: # TODO: remove when Py2.5 support is gone - line 1939: # TODO: we should wrap componentType with UnnamedType to carry courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/pyasn1/codec/ber/encoder.py (2 lines): - line 190: # TODO: try to avoid ASN.1 schema instantiation - line 525: # TODO: handling three flavors of input is too much -- split over codecs courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving/application/lib/pyasn1/type/univ.py (2 lines): - line 1711: # TODO: remove when Py2.5 support is gone - line 1939: # TODO: we should wrap componentType with UnnamedType to carry courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/taxifare/trainer/model.py (2 lines): - line 174: # TODO 1a: Your code here - line 180: # TODO 1b: Your code here bootcamps/imagereco/fashion_cnn_batch_dropout_lab.ipynb (2 lines): - line 47: "**TODO: Add batch normalization to the last dense layer of your CNN**\n", - line 69: "**TODO: Add dropout to the batch normed layer of your CNN**\n", courses/developingapps/python/appengine/end/frontend/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, return the result courses/developingapps/v1.3/python/appengine/start/frontend/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, return the result courses/developingapps/python/pubsub-languageapi-spanner/start/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, return the result courses/machine_learning/deepdive/03_tensorflow/labs/taxifare/trainer/model.py (2 lines): - line 37: # TODO: Add input function - line 68: # TODO: Create tf.estimator.DNNRegressor train and evaluate function passing args['parsed_argument'] from task.py courses/machine_learning/deepdive/05_review/labs/serving/pipeline/src/main/java/com/google/cloud/training/mlongcp/BabyweightMLService.java (2 lines): - line 28: private static final String PROJECT = "cloud-training-demos"; // TODO: put in your project name here - line 30: private static String VERSION = "ml_on_gcp"; // TODO: put in your version name here courses/machine_learning/deepdive2/structured/labs/serving/application/lib/pyasn1/codec/ber/encoder.py (2 lines): - line 190: # TODO: try to avoid ASN.1 schema instantiation - line 525: # TODO: handling three flavors of input is too much -- split over codecs courses/developingapps/v1.3/python/pubsub-languageapi-spanner/bonus/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 83: # TODO: Publish the feedback using your pubsub module, return the result quests/endtoendml/solutions/labs/serving/pipeline/src/main/java/com/google/cloud/training/mlongcp/BabyweightMLService.java (2 lines): - line 28: private static final String PROJECT = "cloud-training-demos"; // TODO: put in your project name here - line 30: private static String VERSION = "ml_on_gcp"; // TODO: put in your version name here courses/developingapps/python/appengine/start/frontend/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, return the result courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/lib/httplib2/__init__.py (2 lines): - line 435: # TODO: add current time as _entry_disposition argument to avoid sleep in tests - line 755: # TODO: self.challenge['domain'] courses/developingapps/v1.3/python/kubernetesengine/end/frontend/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, return the result courses/developingapps/java/pubsub-languageapi-spanner/start/src/main/java/com/google/training/appdev/api/QuizEndpoint.java (2 lines): - line 44: // TODO: Declare the publishService - line 76: // TODO: Publish the feedback to Pub/Sub blogs/lightning/ltgpred/preproc/create_dataset.py (2 lines): - line 63: # FIXME: leap years? - line 362: 'sdk_location': # FIXME: remove this once 2.12 is released courses/machine_learning/deepdive2/introduction_to_tensorflow/labs/taxifare/trainer/model.py (2 lines): - line 174: # TODO 1a: Your code here - line 180: # TODO 1b: Your code here courses/developingapps/python/pubsub-languageapi-spanner/end/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, return the result courses/developingapps/v1.2/python/kubernetesengine/bonus/frontend/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 85: # TODO: Publish the feedback using your pubsub module, return the result courses/machine_learning/deepdive2/introduction_to_tensorflow/labs/intro_logistic_regression_TF2.0.ipynb (2 lines): - line 106: "model = # TODO 1 -- Your code here." - line 232: "loss_fn = # TODO 2 -- Your code here." courses/developingapps/v1.3/python/kubernetesengine/bonus/frontend/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 85: # TODO: Publish the feedback using your pubsub module, return the result quests/vertex-ai/vertex-challenge-lab/bert-sentiment-classifier/trainer/model.py (2 lines): - line 83: # TODO: Add a hub.KerasLayer for BERT text preprocessing. - line 86: # TODO: Add a hub.KerasLayer for BERT text encoding. courses/developingapps/v1.2/python/pubsub-languageapi-spanner/start/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, courses/developingapps/v1.3/python/appengine/end/frontend/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, return the result courses/developingapps/v1.3/python/pubsub-languageapi-spanner/end/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, courses/machine_learning/deepdive2/structured/labs/serving/application/lib/pyasn1/codec/der/encoder.py (2 lines): - line 32: # TODO: move out of sorting key function - line 39: # TODO: support nested CHOICE ordering courses/developingapps/v1.2/python/pubsub-languageapi-spanner/bonus/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 83: # TODO: Publish the feedback using your pubsub module, return the result courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/lib/pyasn1/codec/ber/encoder.py (2 lines): - line 190: # TODO: try to avoid ASN.1 schema instantiation - line 525: # TODO: handling three flavors of input is too much -- split over codecs courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/pyasn1/codec/der/encoder.py (2 lines): - line 32: # TODO: move out of sorting key function - line 39: # TODO: support nested CHOICE ordering courses/machine_learning/deepdive/08_image_keras/labs/mnist_linear.ipynb (2 lines): - line 124: " # TODO: Create linear model that takes the input features and outputs the normalized probabilities\n", - line 162: " # TODO: Create eval input function. Hint: Be sure to change each of the parameters since training and evaluation are different\n", courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving/application/lib/pyasn1/codec/der/encoder.py (2 lines): - line 32: # TODO: move out of sorting key function - line 39: # TODO: support nested CHOICE ordering courses/developingapps/python/kubernetesengine/end/frontend/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, return the result courses/developingapps/v1.2/python/kubernetesengine/end/frontend/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, return the result courses/machine_learning/deepdive2/structured/labs/serving/application/main.py (2 lines): - line 64: # TODO: Map baby gender values to is_male values - line 69: # TODO: Map plurality integers to our strings courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/templates/form.html (2 lines): - line 49: # TODO: Add option for Twins and Triplets too - line 57: # TODO: Add radio-button for Female quests/serverlessml/07_caip/solution/train_caip.ipynb (2 lines): - line 20: "## TODO: Export the data from BigQuery to GCS\n", - line 30: "## TODO: Edit notebook parameters\n", courses/developingapps/python/kubernetesengine/bonus/frontend/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 85: # TODO: Publish the feedback using your pubsub module, return the result quests/dataflow/4_SQL_Batch_Analytics/labs/src/main/java/com/mypackage/pipeline/BatchUserTrafficSQLPipeline.java (2 lines): - line 115: // TODO: Write SQL Transform - line 123: // TODO: Write branch for raw logs courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/intro_logistic_regression_TF2.0.ipynb (2 lines): - line 106: "# TODO 1\n", - line 231: "loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) # TODO 2" courses/developingapps/v1.3/java/pubsub-languageapi-spanner/start/src/main/java/com/google/training/appdev/api/QuizEndpoint.java (2 lines): - line 44: // TODO: Declare the publishService - line 76: // TODO: Publish the feedback to Pub/Sub courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving/application/lib/pyasn1/type/constraint.py (2 lines): - line 87: # TODO: fix possible comparison of set vs scalars here - line 698: # TODO: quests/serverlessml/07_caip/labs/train_caip.ipynb (2 lines): - line 20: "## TODO: Export the data from BigQuery to GCS\n", - line 30: "## TODO: Edit notebook parameters\n", courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/pyasn1/type/constraint.py (2 lines): - line 87: # TODO: fix possible comparison of set vs scalars here - line 698: # TODO: courses/machine_learning/deepdive2/text_classification/labs/custom_tf_hub_word_embedding.ipynb (2 lines): - line 480: "med_embed = # TODO: Your code goes here." - line 504: "outputs = # TODO: Your code goes here.\n", courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/main.py (2 lines): - line 64: # TODO: Map baby gender values to is_male values - line 69: # TODO: Map plurality integers to our strings courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving/application/lib/pyasn1/codec/ber/encoder.py (2 lines): - line 190: # TODO: try to avoid ASN.1 schema instantiation - line 525: # TODO: handling three flavors of input is too much -- split over codecs courses/machine_learning/deepdive2/text_classification/labs/text_similarity.ipynb (2 lines): - line 59: "# TODO 1: Your code here" - line 227: "# TODO 2: Your code here" courses/machine_learning/deepdive2/explainable_ai/solutions/xai_structured_caip.ipynb (2 lines): - line 90: "PROJECT_ID = \"\" # TODO: your PROJECT_ID here.\n", - line 106: "BUCKET_NAME = \"\" # TODO: your BUCKET_NAME here. \n", courses/machine_learning/deepdive2/structured/labs/serving/application/app.yaml (2 lines): - line 13: MODEL_NAME: # TODO: Add model name string - line 14: VERSION_NAME: # TODO: Add model version string courses/developingapps/v1.2/python/appengine/start/frontend/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, return the result courses/developingapps/python/pubsub-languageapi-spanner/bonus/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 83: # TODO: Publish the feedback using your pubsub module, return the result courses/machine_learning/deepdive2/structured/labs/serving/application/lib/pyasn1/type/univ.py (2 lines): - line 1711: # TODO: remove when Py2.5 support is gone - line 1939: # TODO: we should wrap componentType with UnnamedType to carry self-paced-labs/tensorflow-2.x/synthetic_features_and_outliers.ipynb (2 lines): - line 381: "__#TODO:__ Plot a scatter graph to show the scatter points. " - line 468: "__#TODO:__ Let's clip rooms_per_person to 5, and plot a histogram to double-check the results." courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/app.yaml (2 lines): - line 13: MODEL_NAME: # TODO: Add model name string - line 14: VERSION_NAME: # TODO: Add model version string courses/developingapps/v1.2/python/appengine/end/frontend/quiz/api/api.py (2 lines): - line 22: # TODO: Add pubsub to import list - line 74: # TODO: Publish the feedback using your pubsub module, return the result courses/machine_learning/deepdive2/structured/labs/serving/application/lib/pyasn1/type/constraint.py (2 lines): - line 87: # TODO: fix possible comparison of set vs scalars here - line 698: # TODO: self-paced-labs/vertex-ai/vertex-challenge-lab/bert-sentiment-classifier/trainer/model.py (2 lines): - line 83: # TODO: Add a hub.KerasLayer for BERT text preprocessing. - line 86: # TODO: Add a hub.KerasLayer for BERT text encoding. courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/lib/pyasn1/type/constraint.py (2 lines): - line 87: # TODO: fix possible comparison of set vs scalars here - line 698: # TODO: courses/machine_learning/deepdive/03_tensorflow/labs/d_traineval.ipynb (2 lines): - line 165: "## TODO: Create serving input function\n", - line 191: "## TODO: Create train and evaluate function using tf.estimator\n", courses/machine_learning/deepdive2/production_ml/labs/serving_ml_prediction.ipynb (2 lines): - line 79: " # TODO: Your code goes here\n", - line 119: "# TODO: Your code goes here\n" courses/machine_learning/deepdive/10_recommend/labs/content_based_by_hand.ipynb (2 lines): - line 134: "We will compute the user feature matrix; that is, a matrix containing each user's embedding in the five-dimensional feature space. We can calculuate this as the matrix multiplication of the `users_movies` tensor with the `movies_feats` tensor. Implement this in the TODO below." - line 151: "users_feats = #TODO \n", courses/machine_learning/deepdive2/building_production_ml_systems/solutions/2_hyperparameter_tuning.ipynb (2 lines): - line 442: " # TODO 1\n", - line 445: " # TODO 1\n", quests/bq-optimize/02_partition_and_clustering/solution/partition_and_cluster.ipynb (2 lines): - line 997: " # TODO: Specify a date field to partition on and a field to cluster on:\n", - line 2284: "# TODO write where clause filters to pull latest performance from each table (and debug why they keep getting truncated)\n", courses/machine_learning/deepdive2/recommendation_systems/solutions/als_bqml_hybrid.ipynb (2 lines): - line 672: "**TODO 1**: Combine the above two queries to get the user factors and product factor for each rating." - line 1034: "**TODO 2**: Create a function that returns named columns from a size 16 product factor array." courses/machine_learning/deepdive/05_artandscience/labs/b_hyperparam.ipynb (1 line): - line 132: " # TODO: Add learning_rate and batch_size as command line args\n", courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving/application/lib/jinja2/ext.py (1 line): - line 162: # TODO: the i18n extension is currently reevaluating values in a few quests/dataflow/7_Advanced_Streaming_Analytics/labs/src/main/java/com/mypackage/pipeline/StreamingMinuteTrafficPipeline.java (1 line): - line 155: //TODO: Add withAllowedLateness courses/developingapps/v1.3/nodejs/stackdriver-trace-monitoring/start/frontend/app.js (1 line): - line 17: // TODO: Load the trace-agent and start it quests/endtoendml/labs/5_train_keras.ipynb (1 line): - line 18: "__TODO__: Complete the lab notebook #TODO sections. You can refer to the [solutions/](../solutions/5_train_keras.ipynb) notebook for reference. \n", courses/ai-for-finance/solution/momentum_backtest_losing_money.ipynb (1 line): - line 591: " ### TODO : FILL THIS FUNCTION TO RETURN A BUY (1) or SELL (0) prediction for each stock ###\n", courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/pyasn1/codec/cer/decoder.py (1 line): - line 37: # TODO: prohibit non-canonical encoding courses/machine_learning/deepdive2/launching_into_ml/solutions/bigquery.ipynb (1 line): - line 472: "TODO: replace \\ with your PROJECT_ID\n", courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/jinja2/ext.py (1 line): - line 162: # TODO: the i18n extension is currently reevaluating values in a few courses/developingapps/python/kubernetesengine/end/backend/start/frontend/run_server.py (1 line): - line 17: # TODO: Add the following statement to import and start courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving_babyweight.ipynb (1 line): - line 118: "# TODO 1: Deploy a web application that consumes your model service on Cloud AI Platform\n", courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/werkzeug/_reloader.py (1 line): - line 103: # TODO remove this once Flask no longer misbehaves courses/developingapps/python/pubsub-languageapi-spanner/end/run_server.py (1 line): - line 17: # TODO: Add the following statement to import and start courses/developingapps/python/datastore/end/quiz/api/api.py (1 line): - line 22: # TODO: Add pubsub to import list self-paced-labs/tfx/tfx-ai-platform/labs/lab_exercise.ipynb (1 line): - line 240: "#TODO: Set your environment resource settings here for ENDPOINT.\n", courses/developingapps/v1.2/python/appengine/end/frontend/run_server.py (1 line): - line 17: # TODO: Add the following statement to import and start courses/machine_learning/deepdive2/structured/labs/serving/application/lib/pyasn1/compat/octets.py (1 line): - line 15: # TODO: refactor to return a sequence of ints courses/machine_learning/deepdive/06_structured/babyweight_tf2/trainer/task.py (1 line): - line 46: ## TODO 1: add the new arguments here courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/lib/pyasn1/compat/octets.py (1 line): - line 15: # TODO: refactor to return a sequence of ints courses/developingapps/v1.3/python/datastore/bonus/quiz/api/api.py (1 line): - line 22: # TODO: Add pubsub to import list courses/developingapps/python/appengine/end/frontend/run_server.py (1 line): - line 17: # TODO: Add the following statement to import and start courses/machine_learning/deepdive2/structured/labs/serving/application/lib/jinja2/ext.py (1 line): - line 162: # TODO: the i18n extension is currently reevaluating values in a few courses/machine_learning/deepdive2/image_classification/solutions/tpu_models/trainer/task.py (1 line): - line 50: # TODO: define a TPU strategy courses/developingapps/v1.3/nodejs/stackdriver-trace-monitoring/end/frontend/app.js (1 line): - line 17: // TODO: Load the trace-agent and start it courses/machine_learning/deepdive2/time_series_prediction/labs/optional_2_feature_engineering.ipynb (1 line): - line 373: "--# TODO: verify the stock market is going up -- on average.\n", courses/machine_learning/deepdive2/building_production_ml_systems/labs/taxicab_traffic/streaming_count.py (1 line): - line 69: | 'window' >> # TODO: Your code goes here. courses/machine_learning/deepdive/05_review/labs/serving/application/main.py (1 line): - line 83: features['gestation_weeks'] = # TODO: get gestation_weeks and cast to float blogs/popdensity/convert_to_geo.py (1 line): - line 71: YEAR = 2020 # FIXME: parse filename courses/machine_learning/deepdive2/structured/solutions/5a_train_keras_ai_platform_babyweight.ipynb (1 line): - line 104: "# TODO: Change these to try this notebook out\n", courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving/application/lib/flask/helpers.py (1 line): - line 286: # TODO: get rid of this deprecated functionality in 1.0 courses/developingapps/v1.2/python/datastore/end/quiz/webapp/questions.py (1 line): - line 14: # TODO: Import the datastore module courses/developingapps/python/datastore/bonus/quiz/api/api.py (1 line): - line 22: # TODO: Add pubsub to import list courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/flask/helpers.py (1 line): - line 286: # TODO: get rid of this deprecated functionality in 1.0 courses/developingapps/v1.2/python/kubernetesengine/start/frontend/run_server.py (1 line): - line 17: # TODO: Add the following statement to import and start quests/endtoendml/labs/babyweight_tf2/trainer/task.py (1 line): - line 46: ## TODO 1: add the new arguments here courses/developingapps/v1.3/nodejs/stackdriver-trace-monitoring/start/frontend/api/index.js (1 line): - line 66: // TODO: Sends the answers to Pub/Sub in parallel courses/developingapps/v1.3/python/firebase/end/quiz/api/api.py (1 line): - line 22: # TODO: Add pubsub to import list courses/developingapps/v1.3/python/kubernetesengine/bonus/frontend/run_server.py (1 line): - line 17: # TODO: Add the following statement to import and start courses/developingapps/v1.3/nodejs/firebase/end/server/public/client/index.html (1 line): - line 39: courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving/application/lib/pyasn1/compat/integer.py (1 line): - line 93: # TODO: strip lhs zeros courses/developingapps/python/cloudstorage/end/quiz/api/api.py (1 line): - line 22: # TODO: Add pubsub to import list courses/developingapps/python/firebase/end/quiz/api/api.py (1 line): - line 22: # TODO: Add pubsub to import list courses/data_analysis/lab2/python/JavaProjectsThatNeedHelp.py (1 line): - line 26: Needing help is determined by counting the number of times the package contains the words FIXME or TODO courses/machine_learning/asl/05_review/labs/serving/application/templates/form.html (1 line): - line 49: courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving/application/lib/pyasn1/codec/cer/decoder.py (1 line): - line 37: # TODO: prohibit non-canonical encoding self-paced-labs/vertex-ai/vertex-pipelines/tfx/lab_exercise.ipynb (1 line): - line 654: "# TODO: create a Docker Artifact Registry using the gcloud CLI.\n", courses/machine_learning/deepdive2/machine_learning_in_the_enterprise/labs/taxicab_traffic/streaming_count.py (1 line): - line 69: | 'window' >> # TODO: Your code goes here. quests/bq-teradata/01_teradata_bq_essentials/labs/bigquery_essentials_for_teradata_users.ipynb (1 line): - line 440: "--TODO: \n", courses/ai-for-finance/practice/freestyle.ipynb (1 line): - line 759: "# TODO: Write code to build a model to predict Direction" courses/developingapps/v1.3/python/cloudstorage/start/quiz/api/api.py (1 line): - line 22: # TODO: Add pubsub to import list courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/lib/pyasn1/codec/der/decoder.py (1 line): - line 20: # TODO: prohibit non-canonical encoding courses/machine_learning/deepdive/05_artandscience/labs/c_neuralnetwork.ipynb (1 line): - line 297: " estimator = # TODO: Implement DNN Regressor model\n", courses/machine_learning/deepdive2/how_google_does_ml/bigquery/labs/analyze_with_bigquery.ipynb (1 line): - line 46: "PROJECT = \"\" #TODO Replace with your project id\n", courses/machine_learning/deepdive2/end_to_end_ml/labs/serving_babyweight.ipynb (1 line): - line 109: "# TODO 1: Your code goes here" courses/machine_learning/deepdive2/structured/labs/serving/application/lib/flask/helpers.py (1 line): - line 286: # TODO: get rid of this deprecated functionality in 1.0 courses/developingapps/v1.2/nodejs/stackdriver-trace-monitoring/end/frontend/api/index.js (1 line): - line 66: // TODO: Sends the answers to Pub/Sub in parallel quests/endtoendml/solutions/labs/serving/application/templates/form.html (1 line): - line 49: courses/developingapps/v1.2/python/firebase/end/quiz/api/api.py (1 line): - line 22: # TODO: Add pubsub to import list courses/machine_learning/deepdive/06_structured/babyweight/trainer/task.py (1 line): - line 46: ## TODO 1: add the new arguments here courses/developingapps/v1.2/nodejs/stackdriver-trace-monitoring/end/frontend/app.js (1 line): - line 17: // TODO: Load the trace-agent and start it courses/ai-for-finance/solution/yahoo_data_source.py (1 line): - line 156: # TODO: Fix for python 3 courses/developingapps/v1.3/nodejs/stackdriver-trace-monitoring/end/frontend/api/index.js (1 line): - line 66: // TODO: Sends the answers to Pub/Sub in parallel courses/developingapps/v1.2/nodejs/firebase/end/server/public/client/index.html (1 line): - line 39: courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/click/_termui_impl.py (1 line): - line 374: # TODO: This never terminates if the passed generator never terminates. courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/lib/jinja2/ext.py (1 line): - line 162: # TODO: the i18n extension is currently reevaluating values in a few courses/developingapps/python/datastore/bonus/quiz/webapp/questions.py (1 line): - line 14: # TODO: Import the datastore module quests/endtoendml/labs/2_sample.ipynb (1 line): - line 33: "# TODO: change these to reflect your environment\n", courses/developingapps/v1.3/python/cloudstorage/end/quiz/api/api.py (1 line): - line 22: # TODO: Add pubsub to import list blogs/microservices-demo-1/src/checkoutservice/main.go (1 line): - line 431: // TODO: Dial and create client once, reuse. courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/pyasn1/codec/der/decoder.py (1 line): - line 20: # TODO: prohibit non-canonical encoding courses/machine_learning/deepdive2/structured/labs/serving/application/lib/pyasn1_modules/rfc2459.py (1 line): - line 4: # Updated by Russ Housley to resolve the TODO regarding the Certificate courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/lib/pyasn1_modules/rfc2459.py (1 line): - line 4: # Updated by Russ Housley to resolve the TODO regarding the Certificate quests/endtoendml/solutions/labs/6_deploy.ipynb (1 line): - line 181: "# TODO: complete\n", quests/endtoendml/solutions/babyweight/trainer/task.py (1 line): - line 46: ## TODO 1: add the new arguments here courses/developingapps/v1.2/python/datastore/bonus/quiz/api/api.py (1 line): - line 22: # TODO: Add pubsub to import list courses/machine_learning/deepdive2/art_and_science_of_ml/labs/taxicab_traffic/streaming_count.py (1 line): - line 69: | 'window' >> # TODO: Your code goes here. courses/developingapps/v1.3/python/datastore/end/quiz/api/api.py (1 line): - line 22: # TODO: Add pubsub to import list courses/machine_learning/deepdive/09_sequence/labs/reusable-embeddings.ipynb (1 line): - line 659: " hub_module, # TODO \n", courses/developingapps/python/kubernetesengine/end/frontend/run_server.py (1 line): - line 17: # TODO: Add the following statement to import and start courses/machine_learning/deepdive2/structured/solutions/5b_deploy_keras_ai_platform_babyweight.ipynb (1 line): - line 144: "!gsutil cp -r ./babyweight gs://bucket-name # TODO: Replace with your bucket-name" blogs/rl-on-gcp/DQN_Breakout/RL_on_GCP.ipynb (1 line): - line 288: "#### TODO: why doesn't it use GPU?\n", courses/developingapps/v1.2/nodejs/stackdriver-trace-monitoring/start/frontend/api/index.js (1 line): - line 66: // TODO: Sends the answers to Pub/Sub in parallel courses/developingapps/v1.2/python/datastore/bonus/quiz/webapp/questions.py (1 line): - line 14: # TODO: Import the datastore module courses/developingapps/v1.2/python/cloudstorage/end/quiz/api/api.py (1 line): - line 22: # TODO: Add pubsub to import list courses/machine_learning/deepdive2/structured/labs/serving/application/lib/pyasn1/codec/der/decoder.py (1 line): - line 20: # TODO: prohibit non-canonical encoding courses/machine_learning/deepdive2/structured/solutions/1b_prepare_data_babyweight.ipynb (1 line): - line 166: "# TODO: Change environment variables\n", courses/developingapps/v1.3/python/pubsub-languageapi-spanner/bonus/run_server.py (1 line): - line 17: # TODO: Add the following statement to import and start courses/developingapps/v1.3/python/firebase/start/quiz/api/api.py (1 line): - line 22: # TODO: Add pubsub to import list courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/lib/werkzeug/_reloader.py (1 line): - line 103: # TODO remove this once Flask no longer misbehaves courses/developingapps/v1.3/python/datastore/end/quiz/webapp/questions.py (1 line): - line 14: # TODO: Import the datastore module courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving/application/lib/pyasn1_modules/rfc2459.py (1 line): - line 4: # Updated by Russ Housley to resolve the TODO regarding the Certificate courses/data_analysis/lab2/javahelp/src/main/java/com/google/cloud/training/dataanalyst/javahelp/JavaProjectsThatNeedHelp.java (1 line): - line 45: * the words FIXME or TODO in its source) courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/pyasn1/compat/integer.py (1 line): - line 93: # TODO: strip lhs zeros courses/developingapps/python/appengine/start/frontend/run_server.py (1 line): - line 17: # TODO: Add the following statement to import and start courses/machine_learning/deepdive2/structured/labs/serving/application/lib/werkzeug/_reloader.py (1 line): - line 103: # TODO remove this once Flask no longer misbehaves courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving/application/lib/click/_termui_impl.py (1 line): - line 374: # TODO: This never terminates if the passed generator never terminates. courses/machine_learning/deepdive/04_features/dataflow/python/JavaProjectsThatNeedHelp_PY2_Version.py (1 line): - line 26: Needing help is determined by counting the number of times the package contains the words FIXME or TODO courses/data_analysis/deepdive/bigtable-exercises/src/main/java/com/google/cloud/bigtable/training/Ex2.java (1 line): - line 102: // TODO: Do a scan of rows beginning with the string "action" courses/machine_learning/deepdive2/structured/labs/serving/application/lib/pyasn1/codec/cer/decoder.py (1 line): - line 37: # TODO: prohibit non-canonical encoding courses/ai-for-finance/practice/intro_tf_data_keras_sequential.ipynb (1 line): - line 383: "# TODO 1: Create input layer of feature columns\n" courses/machine_learning/deepdive/04_features/dataflow/javahelp/src/main/java/com/google/cloud/training/dataanalyst/javahelp/JavaProjectsThatNeedHelp.java (1 line): - line 45: * the words FIXME or TODO in its source) quests/endtoendml/solutions/babyweight_tf2/trainer/task.py (1 line): - line 46: ## TODO 1: add the new arguments here courses/developingapps/python/firebase/start/quiz/api/api.py (1 line): - line 22: # TODO: Add pubsub to import list courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving/application/lib/pyasn1/compat/octets.py (1 line): - line 15: # TODO: refactor to return a sequence of ints courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/lib/pyasn1/compat/integer.py (1 line): - line 93: # TODO: strip lhs zeros quests/endtoendml/solutions/labs/serving/application/main.py (1 line): - line 83: features['gestation_weeks'] = # TODO: get gestation_weeks and cast to float courses/developingapps/v1.3/python/kubernetesengine/end/frontend/run_server.py (1 line): - line 17: # TODO: Add the following statement to import and start courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving/application/lib/werkzeug/_reloader.py (1 line): - line 103: # TODO remove this once Flask no longer misbehaves quests/dataflow/6_SQL_Streaming_Analytics/labs/src/main/java/com/mypackage/pipeline/StreamingMinuteTrafficSQLPipeline.java (1 line): - line 136: // TODO: implement steps in the pipeline courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/lib/click/_termui_impl.py (1 line): - line 374: # TODO: This never terminates if the passed generator never terminates. courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/lib/pyasn1/codec/cer/decoder.py (1 line): - line 37: # TODO: prohibit non-canonical encoding courses/developingapps/v1.2/python/pubsub-languageapi-spanner/bonus/run_server.py (1 line): - line 17: # TODO: Add the following statement to import and start courses/machine_learning/deepdive2/end_to_end_ml/solutions/preproc.ipynb (1 line): - line 496: "# TODO 1\n", quests/endtoendml/labs/babyweight/trainer/task.py (1 line): - line 46: ## TODO 1: add the new arguments here courses/ai-for-finance/solution/momentum_backtest_making_money.ipynb (1 line): - line 466: " ### TODO : FILL THIS FUNCTION TO RETURN A BUY (1) or SELL (0) prediction for each stock ###\n", courses/machine_learning/deepdive2/structured/labs/serving/application/lib/click/_termui_impl.py (1 line): - line 374: # TODO: This never terminates if the passed generator never terminates. courses/developingapps/v1.2/python/kubernetesengine/end/frontend/run_server.py (1 line): - line 17: # TODO: Add the following statement to import and start