in src/AIHub/Controllers/ContentSafetyController.cs [164:224]
public IActionResult EvaluateText()
{
if (CheckNullValues(HttpContext))
{
ViewBag.Message = "You must enter a value for each threshold";
return View("TextModerator", model);
}
if (string.IsNullOrEmpty(HttpContext.Request.Form["text"]))
{
ViewBag.Message = "You must enter a text to evaluate";
return View("TextModerator", model);
}
model.Severity = Convert.ToInt32(HttpContext.Request.Form["severitytext"]);
model.Violence = Convert.ToInt32(HttpContext.Request.Form["violencetext"]);
model.SelfHarm = Convert.ToInt32(HttpContext.Request.Form["shtext"]);
model.Hate = Convert.ToInt32(HttpContext.Request.Form["hatetext"]);
model.Text = HttpContext.Request.Form["text"];
model.Approve = true;
ContentSafetyClient client = new ContentSafetyClient(new Uri(endpoint), new AzureKeyCredential(subscriptionKey));
var request = new AnalyzeTextOptions(model.Text);
Response<AnalyzeTextResult> response;
try
{
response = client.AnalyzeText(request);
if (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == TextCategory.Hate)?.Severity > model.Hate)
{
model.Approve = false;
}
if (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == TextCategory.SelfHarm)?.Severity > model.SelfHarm)
{
model.Approve = false;
}
if (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == TextCategory.Sexual)?.Severity > model.Severity)
{
model.Approve = false;
}
if (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == TextCategory.Violence)?.Severity > model.Violence)
{
model.Approve = false;
}
ViewBag.Message = "Resultado de la moderación: \n" +
(model.Approve.Value ? "APROBADO" : "RECHAZADO") + "\n" +
"Hate severity: " + (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == TextCategory.Hate)?.Severity ?? 0) + "\n" +
"SelfHarm severity: " + (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == TextCategory.SelfHarm)?.Severity ?? 0) + "\n" +
"Sexual severity: " + (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == TextCategory.Sexual)?.Severity ?? 0) + "\n" +
"Violence severity: " + (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == TextCategory.Violence)?.Severity ?? 0);
}
catch (RequestFailedException ex)
{
_logger.LogError(ex, "An error occurred while evaluating the content." + ex.Message);
ViewBag.Message = "An error occurred while evaluating the content. Please try again later.";
return View("TextModerator", model);
}
return View("TextModerator", model);
}