in Extended/libwebp/src/enc/picture_csp_enc.c [445:568]
static int PreprocessARGB(const uint8_t* r_ptr,
const uint8_t* g_ptr,
const uint8_t* b_ptr,
int step, int rgb_stride,
WebPPicture* const picture) {
// we expand the right/bottom border if needed
const int w = (picture->width + 1) & ~1;
const int h = (picture->height + 1) & ~1;
const int uv_w = w >> 1;
const int uv_h = h >> 1;
uint64_t prev_diff_y_sum = ~0;
int j, iter;
// TODO(skal): allocate one big memory chunk. But for now, it's easier
// for valgrind debugging to have several chunks.
fixed_y_t* const tmp_buffer = SAFE_ALLOC(w * 3, 2, fixed_y_t); // scratch
fixed_y_t* const best_y_base = SAFE_ALLOC(w, h, fixed_y_t);
fixed_y_t* const target_y_base = SAFE_ALLOC(w, h, fixed_y_t);
fixed_y_t* const best_rgb_y = SAFE_ALLOC(w, 2, fixed_y_t);
fixed_t* const best_uv_base = SAFE_ALLOC(uv_w * 3, uv_h, fixed_t);
fixed_t* const target_uv_base = SAFE_ALLOC(uv_w * 3, uv_h, fixed_t);
fixed_t* const best_rgb_uv = SAFE_ALLOC(uv_w * 3, 1, fixed_t);
fixed_y_t* best_y = best_y_base;
fixed_y_t* target_y = target_y_base;
fixed_t* best_uv = best_uv_base;
fixed_t* target_uv = target_uv_base;
const uint64_t diff_y_threshold = (uint64_t)(3.0 * w * h);
int ok;
if (best_y_base == NULL || best_uv_base == NULL ||
target_y_base == NULL || target_uv_base == NULL ||
best_rgb_y == NULL || best_rgb_uv == NULL ||
tmp_buffer == NULL) {
ok = WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto End;
}
assert(picture->width >= kMinDimensionIterativeConversion);
assert(picture->height >= kMinDimensionIterativeConversion);
WebPInitConvertARGBToYUV();
// Import RGB samples to W/RGB representation.
for (j = 0; j < picture->height; j += 2) {
const int is_last_row = (j == picture->height - 1);
fixed_y_t* const src1 = tmp_buffer + 0 * w;
fixed_y_t* const src2 = tmp_buffer + 3 * w;
// prepare two rows of input
ImportOneRow(r_ptr, g_ptr, b_ptr, step, picture->width, src1);
if (!is_last_row) {
ImportOneRow(r_ptr + rgb_stride, g_ptr + rgb_stride, b_ptr + rgb_stride,
step, picture->width, src2);
} else {
memcpy(src2, src1, 3 * w * sizeof(*src2));
}
StoreGray(src1, best_y + 0, w);
StoreGray(src2, best_y + w, w);
UpdateW(src1, target_y, w);
UpdateW(src2, target_y + w, w);
UpdateChroma(src1, src2, target_uv, uv_w);
memcpy(best_uv, target_uv, 3 * uv_w * sizeof(*best_uv));
best_y += 2 * w;
best_uv += 3 * uv_w;
target_y += 2 * w;
target_uv += 3 * uv_w;
r_ptr += 2 * rgb_stride;
g_ptr += 2 * rgb_stride;
b_ptr += 2 * rgb_stride;
}
// Iterate and resolve clipping conflicts.
for (iter = 0; iter < kNumIterations; ++iter) {
const fixed_t* cur_uv = best_uv_base;
const fixed_t* prev_uv = best_uv_base;
uint64_t diff_y_sum = 0;
best_y = best_y_base;
best_uv = best_uv_base;
target_y = target_y_base;
target_uv = target_uv_base;
for (j = 0; j < h; j += 2) {
fixed_y_t* const src1 = tmp_buffer + 0 * w;
fixed_y_t* const src2 = tmp_buffer + 3 * w;
{
const fixed_t* const next_uv = cur_uv + ((j < h - 2) ? 3 * uv_w : 0);
InterpolateTwoRows(best_y, prev_uv, cur_uv, next_uv, w, src1, src2);
prev_uv = cur_uv;
cur_uv = next_uv;
}
UpdateW(src1, best_rgb_y + 0 * w, w);
UpdateW(src2, best_rgb_y + 1 * w, w);
UpdateChroma(src1, src2, best_rgb_uv, uv_w);
// update two rows of Y and one row of RGB
diff_y_sum += WebPSharpYUVUpdateY(target_y, best_rgb_y, best_y, 2 * w);
WebPSharpYUVUpdateRGB(target_uv, best_rgb_uv, best_uv, 3 * uv_w);
best_y += 2 * w;
best_uv += 3 * uv_w;
target_y += 2 * w;
target_uv += 3 * uv_w;
}
// test exit condition
if (iter > 0) {
if (diff_y_sum < diff_y_threshold) break;
if (diff_y_sum > prev_diff_y_sum) break;
}
prev_diff_y_sum = diff_y_sum;
}
// final reconstruction
ok = ConvertWRGBToYUV(best_y_base, best_uv_base, picture);
End:
WebPSafeFree(best_y_base);
WebPSafeFree(best_uv_base);
WebPSafeFree(target_y_base);
WebPSafeFree(target_uv_base);
WebPSafeFree(best_rgb_y);
WebPSafeFree(best_rgb_uv);
WebPSafeFree(tmp_buffer);
return ok;
}