def pca_model()

in courses/machine_learning/asl/open_project/time_series_anomaly_detection/tf_anomaly_detection_model_selection/anomaly_detection_module/trainer/autoencoder_pca.py [0:0]


def pca_model(X, mode, params, cur_batch_size, dummy_var):
  """PCA to reconstruct inputs and minimize reconstruction error.

  Given data matrix tensor X, the current Estimator mode, the dictionary of
  parameters, current batch size, and the number of features, process through
  PCA model subgraph and return reconstructed inputs as output.

  Args:
    X: tf.float64 matrix tensor of input data.
    mode: Estimator ModeKeys. Can take values of TRAIN, EVAL, and PREDICT.
    params: Dictionary of parameters.
    cur_batch_size: Current batch size, could be partially filled.
    dummy_var: Dummy variable used to allow training mode to happen since it
      requires a gradient to tie back to the graph dependency.

  Returns:
    loss: Reconstruction loss.
    train_op: Train operation so that Estimator can correctly add to dependency
      graph.
    X_time: 2D tensor representation of time major input data.
    X_time_recon: 2D tensor representation of time major input data.
    X_feat: 2D tensor representation of feature major input data.
    X_feat_recon: 2D tensor representation of feature major input data.
  """
  # Reshape into 2-D tensors
  # Time based
  # shape = (cur_batch_size * seq_len, num_feat)
  X_time = tf.reshape(
      tensor=X,
      shape=[cur_batch_size * params["seq_len"], params["num_feat"]])

  # Features based
  # shape = (cur_batch_size, num_feat, seq_len)
  X_transposed = tf.transpose(a=X, perm=[0, 2, 1])

  # shape = (cur_batch_size * num_feat, seq_len)
  X_feat = tf.reshape(
      tensor=X_transposed,
      shape=[cur_batch_size * params["num_feat"], params["seq_len"]])

  ##############################################################################

  # Variables for calculating error distribution statistics
  (pca_time_count_var,
   pca_time_mean_var,
   pca_time_cov_var,
   pca_time_eigval_var,
   pca_time_eigvec_var,
   pca_time_k_pc_var,
   pca_feat_count_var,
   pca_feat_mean_var,
   pca_feat_cov_var,
   pca_feat_eigval_var,
   pca_feat_eigvec_var,
   pca_feat_k_pc_var) = create_both_pca_vars(
      params["seq_len"], params["num_feat"])

  # 3. Loss function, training/eval ops
  if (mode == tf.estimator.ModeKeys.TRAIN and
      params["training_mode"] == "reconstruction"):
    if not params["autotune_principal_components"]:
      with tf.variable_scope(name_or_scope="pca_vars", reuse=tf.AUTO_REUSE):
        # Check if batch is a singleton, very important for covariance math

        # Time based
        # shape = ()
        singleton_condition = tf.equal(
            x=cur_batch_size * params["seq_len"], y=1)

        pca_time_cov_var, pca_time_mean_var, pca_time_count_var = tf.cond(
            pred=singleton_condition,
            true_fn=lambda: singleton_batch_cov_variable_updating(
                params["seq_len"],
                X_time,
                pca_time_count_var,
                pca_time_mean_var,
                pca_time_cov_var),
            false_fn=lambda: non_singleton_batch_cov_variable_updating(
                cur_batch_size,
                params["seq_len"],
                X_time,
                pca_time_count_var,
                pca_time_mean_var,
                pca_time_cov_var))

        # shape = (num_feat,) & (num_feat, num_feat)
        pca_time_eigval_tensor, pca_time_eigvec_tensor = tf.linalg.eigh(
            tensor=pca_time_cov_var)

        if params["k_principal_components_time"] is not None:
          pca_time_k_pc = set_k_principal_components(
              params["k_principal_components_time"], pca_time_k_pc_var)
        else:
          pca_time_k_pc = tf.zeros(shape=(), dtype=tf.float64)

        # Features based
        # shape = ()
        singleton_features_condition = tf.equal(
            x=cur_batch_size * params["num_feat"], y=1)

        pca_feat_cov_var, pca_feat_mean_var, pca_feat_count_var = tf.cond(
            pred=singleton_features_condition,
            true_fn=lambda: singleton_batch_cov_variable_updating(
                params["num_feat"],
                X_feat,
                pca_feat_count_var, pca_feat_mean_var,
                pca_feat_cov_var),
            false_fn=lambda: non_singleton_batch_cov_variable_updating(
                cur_batch_size,
                params["num_feat"],
                X_feat,
                pca_feat_count_var,
                pca_feat_mean_var,
                pca_feat_cov_var))

        # shape = (seq_len,) & (seq_len, seq_len)
        pca_feat_eigval_tensor, pca_feat_eigvec_tensor = tf.linalg.eigh(
            tensor=pca_feat_cov_var)

        if params["k_principal_components_feat"] is not None:
          pca_feat_k_pc = set_k_principal_components(
              params["k_principal_components_feat"], pca_feat_k_pc_var)
        else:
          pca_feat_k_pc = tf.zeros(shape=(), dtype=tf.float64)

      # Lastly use control dependencies around loss to enforce the mahalanobis
      # variables to be assigned, the control order matters, hence the separate
      # contexts
      with tf.control_dependencies(
          control_inputs=[pca_time_cov_var, pca_feat_cov_var]):
        with tf.control_dependencies(
            control_inputs=[pca_time_mean_var, pca_feat_mean_var]):
          with tf.control_dependencies(
              control_inputs=[pca_time_count_var, pca_feat_count_var]):
            with tf.control_dependencies(
                control_inputs=[tf.assign(ref=pca_time_eigval_var,
                                          value=pca_time_eigval_tensor),
                                tf.assign(ref=pca_time_eigvec_var,
                                          value=pca_time_eigvec_tensor),
                                tf.assign(ref=pca_feat_eigval_var,
                                          value=pca_feat_eigval_tensor),
                                tf.assign(ref=pca_feat_eigvec_var,
                                          value=pca_feat_eigvec_tensor),
                                pca_time_k_pc,
                                pca_feat_k_pc]):


              loss = tf.reduce_sum(
                  input_tensor=tf.zeros(
                      shape=(), dtype=tf.float64) * dummy_var)

              train_op = tf.contrib.layers.optimize_loss(
                  loss=loss,
                  global_step=tf.train.get_global_step(),
                  learning_rate=params["learning_rate"],
                  optimizer="SGD")

              return loss, train_op, None, None, None, None
    else:
      # Time based
      if params["k_principal_components_time"] is None:
        # shape = (cur_batch_size * seq_len, num_feat)
        X_time_cen = X_time - pca_time_mean_var

        # shape = (num_feat - 1,)
        X_time_recon_mse = tf.map_fn(
            fn=lambda x: pca_reconstruction_k_pc_mse(
                X_time_cen, pca_time_eigvec_var, x),
            elems=tf.range(start=1,
                           limit=params["num_feat"],
                           dtype=tf.int64),
            dtype=tf.float64)

        pca_time_k_pc = find_best_k_principal_components(
            X_time_recon_mse, pca_time_k_pc_var)
      else:
        pca_time_k_pc = set_k_principal_components(
            params["k_principal_components_time"], pca_time_k_pc_var)

      if params["k_principal_components_feat"] is None:
        # Features based
        # shape = (cur_batch_size * num_feat, seq_len)
        X_feat_cen = X_feat - pca_feat_mean_var

        # shape = (seq_len - 1,)
        X_feat_recon_mse = tf.map_fn(
            fn=lambda x: pca_reconstruction_k_pc_mse(
                X_feat_cen, pca_feat_eigvec_var, x),
            elems=tf.range(start=1,
                           limit=params["seq_len"],
                           dtype=tf.int64),
            dtype=tf.float64)

        pca_feat_k_pc = find_best_k_principal_components(
            X_feat_recon_mse, pca_feat_k_pc_var)
      else:
        pca_feat_k_pc = set_k_principal_components(
            params["k_principal_components_feat"], pca_feat_k_pc_var)

      with tf.control_dependencies(
          control_inputs=[pca_time_k_pc, pca_feat_k_pc]):
        loss = tf.reduce_sum(
            input_tensor=tf.zeros(
                shape=(), dtype=tf.float64) * dummy_var)

        train_op = tf.contrib.layers.optimize_loss(
            loss=loss,
            global_step=tf.train.get_global_step(),
            learning_rate=params["learning_rate"],
            optimizer="SGD")

        return loss, train_op, None, None, None, None

  else:
    # Time based
    # shape = (cur_batch_size * seq_len, num_feat)
    X_time_cen = X_time - pca_time_mean_var

    # shape = (cur_batch_size * seq_len, num_feat)
    if params["k_principal_components_time"] is None:
      X_time_recon = pca_reconstruction_k_pc(
          X_time_cen,
          pca_time_eigvec_var,
          pca_time_k_pc_var)
    else:
      X_time_recon = pca_reconstruction_k_pc(
          X_time_cen,
          pca_time_eigvec_var,
          params["k_principal_components_time"])

    # Features based
    # shape = (cur_batch_size * num_feat, seq_len)
    X_feat_cen = X_feat - pca_feat_mean_var

    # shape = (cur_batch_size * num_feat, seq_len)
    if params["k_principal_components_feat"] is None:
      X_feat_recon = pca_reconstruction_k_pc(
          X_feat_cen,
          pca_feat_eigvec_var,
          pca_feat_k_pc_var)
    else:
      X_feat_recon = pca_reconstruction_k_pc(
          X_feat_cen,
          pca_feat_eigvec_var,
          params["k_principal_components_feat"])

    return None, None, X_time_cen, X_time_recon, X_feat_cen, X_feat_recon