evaluations/safety_eval.py [125:133]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            adversarial_eval_w_jailbreak_result = evaluate(
                evaluation_name=f"{prefix} Adversarial Tests w/ Jailbreak", 
                data=adversarial_conversation_result_w_jailbreak,
                evaluators={
                    "sexual": sexual_evaluator,
                    "self_harm": self_harm_evaluator,
                    "hate_unfairness": hate_unfairness_evaluator,
                    "violence": violence_evaluator
                },
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



evaluations/safety_eval.py [139:147]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            adversarial_eval_w_jailbreak_result = evaluate(
                evaluation_name=f"{prefix} Adversarial Tests w/ Jailbreak", 
                data=adversarial_conversation_result_w_jailbreak,
                evaluators={
                    "sexual": sexual_evaluator,
                    "self_harm": self_harm_evaluator,
                    "hate_unfairness": hate_unfairness_evaluator,
                    "violence": violence_evaluator
                },
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



