reagent/reporting/discrete_crr_reporter.py [79:106]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                            ("reward_loss", "reward_loss"),
                            ("logged_propensities", "propensities/logged"),
                            ("logged_rewards", "reward/logged"),
                        ]
                    ],
                    [
                        (
                            f"{key}_tb",
                            agg.TensorBoardActionHistogramAndMeanAggregator(
                                key, category, title, actions
                            ),
                        )
                        for key, category, title in [
                            ("model_propensities", "propensities", "model"),
                            ("model_rewards", "reward", "model"),
                            ("model_values", "value", "model"),
                        ]
                    ],
                )
            },
        }
        super().__init__(self.value_list_observers, self.aggregating_observers)
        self.target_action_distribution = target_action_distribution
        self.recent_window_size = recent_window_size

    # TODO: write this for OSS
    def generate_training_report(self) -> DQNTrainingReport:
        return DQNTrainingReport()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



reagent/reporting/discrete_dqn_reporter.py [79:106]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                            ("reward_loss", "reward_loss"),
                            ("logged_propensities", "propensities/logged"),
                            ("logged_rewards", "reward/logged"),
                        ]
                    ],
                    [
                        (
                            f"{key}_tb",
                            agg.TensorBoardActionHistogramAndMeanAggregator(
                                key, category, title, actions
                            ),
                        )
                        for key, category, title in [
                            ("model_propensities", "propensities", "model"),
                            ("model_rewards", "reward", "model"),
                            ("model_values", "value", "model"),
                        ]
                    ],
                )
            },
        }
        super().__init__(self.value_list_observers, self.aggregating_observers)
        self.target_action_distribution = target_action_distribution
        self.recent_window_size = recent_window_size

    # TODO: write this for OSS
    def generate_training_report(self) -> DQNTrainingReport:
        return DQNTrainingReport()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



