static int y4m_input_open_impl()

in libvmaf/tools/y4m_input.c [561:705]


static int y4m_input_open_impl(y4m_input *_y4m,FILE *_fin){
  char buffer[Y4M_HEADER_BUFSIZE];
  int  ret;
  int  i;
  int  xstride;
  /*Read until newline, or Y4M_HEADER_BUFSIZE cols, whichever happens first.*/
  for(i=0;i<Y4M_HEADER_BUFSIZE-1;i++){
    ret=fread(buffer+i,1,1,_fin);
    if(ret<1)return -1;
    if(buffer[i]=='\n')break;
  }
  buffer[i]='\0';
  if(memcmp(buffer,"YUV4MPEG",8)){
    fprintf(stderr,"Incomplete magic for YUV4MPEG file.\n");
    return -1;
  }
  if(buffer[8]!='2'){
    fprintf(stderr,"Incorrect YUV input file version; YUV4MPEG2 required.\n");
  }
  ret=y4m_parse_tags(_y4m,buffer+5);
  if(ret<0){
    fprintf(stderr,"Error parsing YUV4MPEG2 header.\n");
    return ret;
  }
  if(_y4m->interlace=='?'){
    fprintf(stderr,"Warning: Input video interlacing format unknown; "
     "assuming progressive scan.\n");
  }
  else if(_y4m->interlace!='p'){
    fprintf(stderr,"Input video is interlaced; "
     "Theora only handles progressive scan.\n");
    return -1;
  }
  _y4m->depth=8;
  if(strcmp(_y4m->chroma_type,"420")==0||
   strcmp(_y4m->chroma_type,"420jpeg")==0||
   strcmp(_y4m->chroma_type,"420mpeg2")==0){
    _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=2;
    _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h
     +2*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
    /*Natively supported: no conversion required.*/
    _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=0;
    _y4m->convert=y4m_convert_null;
  }
  else if(strcmp(_y4m->chroma_type,"420p10")==0){
    _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=2;
    _y4m->dst_buf_read_sz=(_y4m->pic_w*_y4m->pic_h
                           +2*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2))*2;
    _y4m->depth=10;
    /*Natively supported: no conversion required.*/
    _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=0;
    _y4m->convert=y4m_convert_null;
  }
  else if (strcmp(_y4m->chroma_type,"422p10")==0) {
    _y4m->src_c_dec_h=_y4m->dst_c_dec_h=2;
    _y4m->src_c_dec_v=_y4m->dst_c_dec_v=1;
    _y4m->depth = 10;
    _y4m->dst_buf_read_sz = 2*(_y4m->pic_w*_y4m->pic_h
		    +2*((_y4m->pic_w+1)/2)*_y4m->pic_h);
    _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
    _y4m->convert = y4m_convert_null;
  }
  else if(strcmp(_y4m->chroma_type,"444p10")==0){
    _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=1;
    _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h*3*2;
    _y4m->depth=10;
    /*Natively supported: no conversion required.*/
    _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=0;
    _y4m->convert=y4m_convert_null;
  }
  else if(strcmp(_y4m->chroma_type,"420paldv")==0){
    _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=2;
    _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
    /*Chroma filter required: read into the aux buf first.
      We need to make two filter passes, so we need some extra space in the
       aux buffer.*/
    _y4m->aux_buf_sz=3*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
    _y4m->aux_buf_read_sz=2*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
    _y4m->convert=y4m_convert_42xpaldv_42xjpeg;
  }
  else if(strcmp(_y4m->chroma_type,"422")==0){
    _y4m->src_c_dec_h=_y4m->dst_c_dec_h=2;
    _y4m->src_c_dec_v=_y4m->dst_c_dec_v=1;
    _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
    /*Chroma filter required: read into the aux buf first.*/
    _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=2*((_y4m->pic_w+1)/2)*_y4m->pic_h;
    _y4m->convert=y4m_convert_42xmpeg2_42xjpeg;
  }
  else if(strcmp(_y4m->chroma_type,"411")==0){
    _y4m->src_c_dec_h=4;
    /*We don't want to introduce any additional sub-sampling, so we
       promote 4:1:1 material to 4:2:2, as the closest format Theora can
       handle.*/
    _y4m->dst_c_dec_h=2;
    _y4m->src_c_dec_v=_y4m->dst_c_dec_v=1;
    _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
    /*Chroma filter required: read into the aux buf first.*/
    _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=2*((_y4m->pic_w+3)/4)*_y4m->pic_h;
    _y4m->convert=y4m_convert_411_422jpeg;
  }
  else if(strcmp(_y4m->chroma_type,"444")==0){
    _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=1;
    _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h*3;
    /*Natively supported: no conversion required.*/
    _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=0;
    _y4m->convert=y4m_convert_null;
  }
  else if(strcmp(_y4m->chroma_type,"444alpha")==0){
    _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=1;
    _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h*3;
    /*Read the extra alpha plane into the aux buf.
      It will be discarded.*/
    _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
    _y4m->convert=y4m_convert_null;
  }
  else if(strcmp(_y4m->chroma_type,"mono")==0){
    _y4m->src_c_dec_h=_y4m->src_c_dec_v=0;
    _y4m->dst_c_dec_h=_y4m->dst_c_dec_v=2;
    _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
    /*No extra space required, but we need to clear the chroma planes.*/
    _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=0;
    _y4m->convert=y4m_convert_mono_420jpeg;
  }
  else{
    fprintf(stderr,"Unknown chroma sampling type: %s\n",_y4m->chroma_type);
    return -1;
  }
  xstride = (_y4m->depth>8)?2:1;
  /*The size of the final frame buffers is always computed from the
     destination chroma decimation type.*/
  _y4m->dst_buf_sz=_y4m->pic_w*_y4m->pic_h
   +2*((_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h)*
   ((_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v);
  _y4m->dst_buf_sz*=xstride;
  /*Scale the picture size up to a multiple of 16.*/
  _y4m->frame_w = (_y4m->pic_w + 15) & ~0xF;
  _y4m->frame_h = (_y4m->pic_h + 15) & ~0xF;
  /*Force the offsets to be even so that chroma samples line up like we
     expect.*/
  _y4m->pic_x=(_y4m->frame_w-_y4m->pic_w)>>1&~1;
  _y4m->pic_y=(_y4m->frame_h-_y4m->pic_h)>>1&~1;
  _y4m->dst_buf=(unsigned char *)malloc(_y4m->dst_buf_sz);
  _y4m->aux_buf=_y4m->aux_buf_sz?(unsigned char *)malloc(_y4m->aux_buf_sz):NULL;
  return 0;
}