in tfjs-core/src/ops/browser.ts [65:181]
function fromPixels_(
pixels: PixelData|ImageData|HTMLImageElement|HTMLCanvasElement|
HTMLVideoElement|ImageBitmap,
numChannels = 3): Tensor3D {
// Sanity checks.
if (numChannels > 4) {
throw new Error(
'Cannot construct Tensor with more than 4 channels from pixels.');
}
if (pixels == null) {
throw new Error('pixels passed to tf.browser.fromPixels() can not be null');
}
let isPixelData = false;
let isImageData = false;
let isVideo = false;
let isImage = false;
let isCanvasLike = false;
let isImageBitmap = false;
if ((pixels as PixelData).data instanceof Uint8Array) {
isPixelData = true;
} else if (
typeof (ImageData) !== 'undefined' && pixels instanceof ImageData) {
isImageData = true;
} else if (
typeof (HTMLVideoElement) !== 'undefined' &&
pixels instanceof HTMLVideoElement) {
isVideo = true;
} else if (
typeof (HTMLImageElement) !== 'undefined' &&
pixels instanceof HTMLImageElement) {
isImage = true;
// tslint:disable-next-line: no-any
} else if ((pixels as any).getContext != null) {
isCanvasLike = true;
} else if (
typeof (ImageBitmap) !== 'undefined' && pixels instanceof ImageBitmap) {
isImageBitmap = true;
} else {
throw new Error(
'pixels passed to tf.browser.fromPixels() must be either an ' +
`HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData ` +
`in browser, or OffscreenCanvas, ImageData in webworker` +
` or {data: Uint32Array, width: number, height: number}, ` +
`but was ${(pixels as {}).constructor.name}`);
}
if (isVideo) {
const HAVE_CURRENT_DATA_READY_STATE = 2;
if (isVideo &&
(pixels as HTMLVideoElement).readyState <
HAVE_CURRENT_DATA_READY_STATE) {
throw new Error(
'The video element has not loaded data yet. Please wait for ' +
'`loadeddata` event on the <video> element.');
}
}
// If the current backend has 'FromPixels' registered, it has a more
// efficient way of handling pixel uploads, so we call that.
const kernel = getKernel(FromPixels, ENGINE.backendName);
if (kernel != null) {
const inputs: FromPixelsInputs = {pixels};
const attrs: FromPixelsAttrs = {numChannels};
return ENGINE.runKernel(
FromPixels, inputs as {} as NamedTensorMap,
attrs as {} as NamedAttrMap);
}
const [width, height] = isVideo ?
[
(pixels as HTMLVideoElement).videoWidth,
(pixels as HTMLVideoElement).videoHeight
] :
[pixels.width, pixels.height];
let vals: Uint8ClampedArray|Uint8Array;
if (isCanvasLike) {
vals =
// tslint:disable-next-line:no-any
(pixels as any).getContext('2d').getImageData(0, 0, width, height).data;
} else if (isImageData || isPixelData) {
vals = (pixels as PixelData | ImageData).data;
} else if (isImage || isVideo || isImageBitmap) {
if (fromPixels2DContext == null) {
if (typeof document === 'undefined') {
if (typeof OffscreenCanvas !== 'undefined' &&
typeof OffscreenCanvasRenderingContext2D !== 'undefined') {
// @ts-ignore
fromPixels2DContext = new OffscreenCanvas(1, 1).getContext('2d');
} else {
throw new Error(
'Cannot parse input in current context. ' +
'Reason: OffscreenCanvas Context2D rendering is not supported.');
}
} else {
fromPixels2DContext = document.createElement('canvas').getContext('2d');
}
}
fromPixels2DContext.canvas.width = width;
fromPixels2DContext.canvas.height = height;
fromPixels2DContext.drawImage(
pixels as HTMLVideoElement, 0, 0, width, height);
vals = fromPixels2DContext.getImageData(0, 0, width, height).data;
}
let values: Int32Array;
if (numChannels === 4) {
values = new Int32Array(vals);
} else {
const numPixels = width * height;
values = new Int32Array(numPixels * numChannels);
for (let i = 0; i < numPixels; i++) {
for (let channel = 0; channel < numChannels; ++channel) {
values[i * numChannels + channel] = vals[i * 4 + channel];
}
}
}
const outShape: [number, number, number] = [height, width, numChannels];
return tensor3d(values, outShape, 'int32');
}