public IActionResult EvaluateImage()

in src/AIHub/Controllers/ContentSafetyController.cs [105:161]


    public IActionResult EvaluateImage(string imageUrl)
    {
        model.Approve = true;

        ContentSafetyClient client = new ContentSafetyClient(new Uri(endpoint), new AzureKeyCredential(subscriptionKey));

        ContentSafetyImageData image = new ContentSafetyImageData(new Uri(imageUrl));

        var request = new AnalyzeImageOptions(image);

        Response<AnalyzeImageResult> response;
        try
        {
            response = client.AnalyzeImage(request);
            if (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == ImageCategory.Hate)?.Severity > model.Hate)
            {
                model.Approve = false;
            }
            if (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == ImageCategory.SelfHarm)?.Severity > model.SelfHarm)
            {
                model.Approve = false;
            }
            if (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == ImageCategory.Sexual)?.Severity > model.Severity)
            {
                model.Approve = false;
            }
            if (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == ImageCategory.Violence)?.Severity > model.Violence)
            {
                model.Approve = false;
            }

            model.Message = "Resultado de la moderación: \n" +
                        (model.Approve.Value ? "APROBADO" : "RECHAZADO") + "\n" +
                    "Hate severity: " + (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == ImageCategory.Hate)?.Severity ?? 0) + "\n" +
                    "SelfHarm severity: " + (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == ImageCategory.SelfHarm)?.Severity ?? 0) + "\n" +
                    "Sexual severity: " + (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == ImageCategory.Sexual)?.Severity ?? 0) + "\n" +
                    "Violence severity: " + (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == ImageCategory.Violence)?.Severity ?? 0);

            ViewBag.Message = "Resultado de la moderación: \n" +
                        (model.Approve.Value ? "APROBADO" : "RECHAZADO") + "\n" +
                    "Hate severity: " + (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == ImageCategory.Hate)?.Severity ?? 0) + "\n" +
                    "SelfHarm severity: " + (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == ImageCategory.SelfHarm)?.Severity ?? 0) + "\n" +
                    "Sexual severity: " + (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == ImageCategory.Sexual)?.Severity ?? 0) + "\n" +
                    "Violence severity: " + (response.Value.CategoriesAnalysis.FirstOrDefault(a => a.Category == ImageCategory.Violence)?.Severity ?? 0);
            model.Image = imageUrl + sasUri.Query;
            ViewBag.Image = imageUrl + sasUri.Query;

        }
        catch (RequestFailedException ex)
        {
            _logger.LogError(ex, "An error occurred while evaluating the content." + ex.Message);
            ViewBag.Message = "An error occurred while evaluating the content. Please try again later.";
            return View("ImageModerator", model);
        }

        return Ok(model);
    }