export async function readTextureToTexelViews()

in src/webgpu/shader/execution/expression/call/builtin/texture_utils.ts [2916:3174]


export async function readTextureToTexelViews(
  t: GPUTest,
  texture: GPUTexture,
  descriptor: Omit<GPUTextureDescriptor, 'format' | 'usage'>,
  format: EncodableTextureFormat
) {
  const device = t.device;
  const viewDimensionToPipelineMap =
    s_readTextureToRGBA32DeviceToPipeline.get(device) ??
    new Map<GPUTextureViewDimension, GPUComputePipeline>();
  s_readTextureToRGBA32DeviceToPipeline.set(device, viewDimensionToPipelineMap);

  const { componentType, resultType } = getTextureFormatTypeInfo(texture.format);
  const viewDimension = getEffectiveViewDimension(t, descriptor);
  const id = `${texture.format}:${viewDimension}:${texture.sampleCount}`;
  let pipeline = viewDimensionToPipelineMap.get(id);
  if (!pipeline) {
    let textureWGSL;
    let loadWGSL;
    let dimensionWGSL = 'textureDimensions(tex, 0)';
    switch (viewDimension) {
      case '2d':
        if (texture.sampleCount > 1) {
          textureWGSL = `texture_multisampled_2d<${componentType}>`;
          loadWGSL = 'textureLoad(tex, coord.xy, sampleIndex)';
          dimensionWGSL = 'textureDimensions(tex)';
        } else {
          textureWGSL = `texture_2d<${componentType}>`;
          loadWGSL = 'textureLoad(tex, coord.xy, 0)';
        }
        break;
      case 'cube-array': // cube-array doesn't exist in compat so we can just use 2d_array for this
      case '2d-array':
        textureWGSL = `texture_2d_array<${componentType}>`;
        loadWGSL = `
          textureLoad(
              tex,
              coord.xy,
              coord.z,
              0)`;
        break;
      case '3d':
        textureWGSL = `texture_3d<${componentType}>`;
        loadWGSL = 'textureLoad(tex, coord.xyz, 0)';
        break;
      case 'cube':
        textureWGSL = `texture_cube<${componentType}>`;
        loadWGSL = `
          textureLoadCubeAs2DArray(tex, coord.xy, coord.z);
        `;
        break;
      case '1d':
        textureWGSL = `texture_1d<${componentType}>`;
        loadWGSL = `textureLoad(tex, coord.x, 0)`;
        dimensionWGSL = `vec2u(textureDimensions(tex), 1)`;
        break;
      default:
        unreachable(`unsupported view: ${viewDimension}`);
    }

    const textureLoadCubeWGSL = `
      const faceMat = array(
        mat3x3f( 0,  0,  -2,  0, -2,   0,  1,  1,   1),   // pos-x
        mat3x3f( 0,  0,   2,  0, -2,   0, -1,  1,  -1),   // neg-x
        mat3x3f( 2,  0,   0,  0,  0,   2, -1,  1,  -1),   // pos-y
        mat3x3f( 2,  0,   0,  0,  0,  -2, -1, -1,   1),   // neg-y
        mat3x3f( 2,  0,   0,  0, -2,   0, -1,  1,   1),   // pos-z
        mat3x3f(-2,  0,   0,  0, -2,   0,  1,  1,  -1));  // neg-z

      // needed for compat mode.
      fn textureLoadCubeAs2DArray(tex: texture_cube<${componentType}>, coord: vec2u, layer: u32) -> ${resultType} {
        // convert texel coord normalized coord
        let size = textureDimensions(tex, 0);

        // Offset by 0.75 instead of the more common 0.5 for converting from texel to normalized texture coordinate
        // because we're using textureGather. 0.5 would indicate the center of a texel but based on precision issues
        // the "gather" could go in any direction from that center. Off center it should go in an expected direction.
        let uv = (vec2f(coord) + 0.75) / vec2f(size.xy);

        // convert uv + layer into cube coord
        let cubeCoord = faceMat[layer] * vec3f(uv, 1.0);

        // We have to use textureGather as it's the only texture builtin that works on cubemaps
        // with integer texture formats.
        let r = textureGather(0, tex, smp, cubeCoord);
        let g = textureGather(1, tex, smp, cubeCoord);
        let b = textureGather(2, tex, smp, cubeCoord);
        let a = textureGather(3, tex, smp, cubeCoord);

        // element 3 is the texel corresponding to cubeCoord
        return ${resultType}(r[3], g[3], b[3], a[3]);
      }
    `;

    const module = device.createShaderModule({
      code: `
        ${isViewDimensionCubeOrCubeArray(viewDimension) ? textureLoadCubeWGSL : ''}
        struct Uniforms {
          sampleCount: u32,
        };

        @group(0) @binding(0) var<uniform> uni: Uniforms;
        @group(0) @binding(1) var tex: ${textureWGSL};
        @group(0) @binding(2) var smp: sampler;
        @group(0) @binding(3) var<storage, read_write> data: array<${resultType}>;

        @compute @workgroup_size(1) fn cs(
          @builtin(global_invocation_id) global_invocation_id : vec3<u32>) {
          _ = smp;
          let size = ${dimensionWGSL};
          let ndx = global_invocation_id.z * size.x * size.y * uni.sampleCount +
                    global_invocation_id.y * size.x * uni.sampleCount +
                    global_invocation_id.x;
          let coord = vec3u(global_invocation_id.x / uni.sampleCount, global_invocation_id.yz);
          let sampleIndex = global_invocation_id.x % uni.sampleCount;
          data[ndx] = ${loadWGSL};
        }
      `,
    });
    const type = getTextureFormatType(texture.format);
    const sampleType = isDepthTextureFormat(texture.format)
      ? 'unfilterable-float' // depth only supports unfilterable-float if not a comparison.
      : isStencilTextureFormat(texture.format)
      ? 'uint'
      : type === 'float'
      ? 'unfilterable-float'
      : type;
    const bindGroupLayout = device.createBindGroupLayout({
      entries: [
        {
          binding: 0,
          visibility: GPUShaderStage.COMPUTE,
          buffer: {
            type: 'uniform',
          },
        },
        {
          binding: 1,
          visibility: GPUShaderStage.COMPUTE,
          texture: {
            sampleType,
            viewDimension,
            multisampled: texture.sampleCount > 1,
          },
        },
        {
          binding: 2,
          visibility: GPUShaderStage.COMPUTE,
          sampler: {
            type: 'non-filtering',
          },
        },
        {
          binding: 3,
          visibility: GPUShaderStage.COMPUTE,
          buffer: {
            type: 'storage',
          },
        },
      ],
    });
    const layout = device.createPipelineLayout({
      bindGroupLayouts: [bindGroupLayout],
    });
    pipeline = device.createComputePipeline({ layout, compute: { module } });
    viewDimensionToPipelineMap.set(id, pipeline);
  }

  const encoder = device.createCommandEncoder({ label: 'readTextureToTexelViews' });

  const readBuffers = [];
  for (let mipLevel = 0; mipLevel < texture.mipLevelCount; ++mipLevel) {
    const size = virtualMipSize(texture.dimension, texture, mipLevel);

    const uniformValues = new Uint32Array([texture.sampleCount, 0, 0, 0]); // min size is 16 bytes
    const uniformBuffer = t.createBufferTracked({
      label: 'readTextureToTexelViews:uniformBuffer',
      size: uniformValues.byteLength,
      usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
    });
    device.queue.writeBuffer(uniformBuffer, 0, uniformValues);

    const storageBuffer = t.createBufferTracked({
      label: 'readTextureToTexelViews:storageBuffer',
      size: size[0] * size[1] * size[2] * 4 * 4 * texture.sampleCount, // rgba32float
      usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
    });

    const readBuffer = t.createBufferTracked({
      label: 'readTextureToTexelViews:readBuffer',
      size: storageBuffer.size,
      usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
    });
    readBuffers.push({ size, readBuffer });

    const sampler = device.createSampler();

    const aspect = getAspectForTexture(texture);
    const bindGroup = device.createBindGroup({
      layout: pipeline.getBindGroupLayout(0),
      entries: [
        { binding: 0, resource: { buffer: uniformBuffer } },
        {
          binding: 1,
          resource: texture.createView({
            dimension: viewDimension,
            aspect,
            baseMipLevel: mipLevel,
            mipLevelCount: 1,
          }),
        },
        { binding: 2, resource: sampler },
        { binding: 3, resource: { buffer: storageBuffer } },
      ],
    });

    const pass = encoder.beginComputePass();
    pass.setPipeline(pipeline);
    pass.setBindGroup(0, bindGroup);
    pass.dispatchWorkgroups(size[0] * texture.sampleCount, size[1], size[2]);
    pass.end();
    encoder.copyBufferToBuffer(storageBuffer, 0, readBuffer, 0, readBuffer.size);
  }

  device.queue.submit([encoder.finish()]);

  const texelViews: TexelView[] = [];

  for (const { readBuffer, size } of readBuffers) {
    await readBuffer.mapAsync(GPUMapMode.READ);

    // need a copy of the data since unmapping will nullify the typedarray view.
    const Ctor =
      componentType === 'i32' ? Int32Array : componentType === 'u32' ? Uint32Array : Float32Array;
    const data = new Ctor(readBuffer.getMappedRange()).slice();
    readBuffer.unmap();

    const { sampleCount } = texture;
    texelViews.push(
      TexelView.fromTexelsAsColors(format, coord => {
        const offset =
          ((coord.z * size[0] * size[1] + coord.y * size[0] + coord.x) * sampleCount +
            (coord.sampleIndex ?? 0)) *
          4;
        return convertResultFormatToTexelViewFormat(
          {
            R: data[offset + 0],
            G: data[offset + 1],
            B: data[offset + 2],
            A: data[offset + 3],
          },
          format
        );
      })
    );
  }

  return texelViews;
}