def __init__()

in torchbenchmark/models/nvidia_deeprecommender/nvinfer.py [0:0]


  def __init__(self, device = 'cpu', jit=False, batch_size=256, usecommandlineargs = False) :

    self.toytest = True

    self.batch_size = batch_size

    # number of movies in netflix training set.
    self.node_count = 197951

    if self.toytest:
      self.toyinputs = torch.randn(self.batch_size,self.node_count).to(device)

    if usecommandlineargs:
      self.args = getCommandLineArgs()
    else:
      if device == "cpu":
        forcecuda = False
      elif device == "cuda":
        forcecuda = True
      else:
        # unknown device string, quit init
        return

      self.args = getBenchmarkArgs(forcecuda)

    args = processArgState(self.args)

    self.params = dict()
    self.params['batch_size'] = self.args.batch_size
    self.params['data_dir'] =  self.args.path_to_train_data
    self.params['major'] = 'users'
    self.params['itemIdInd'] = 1
    self.params['userIdInd'] = 0
    if not self.args.silent:
      print("Loading training data")
    
    if self.toytest == False:
      self.data_layer = input_layer.UserItemRecDataProvider(params=self.params)
    
      if not self.args.silent:
        print("Data loaded")
        print("Total items found: {}".format(len(self.data_layer.data.keys())))
        print("Vector dim: {}".format(self.data_layer.vector_dim))
  
        print("Loading eval data")

    self.eval_params = copy.deepcopy(self.params)
    # must set eval batch size to 1 to make sure no examples are missed
    self.eval_params['batch_size'] = 1
    self.eval_params['data_dir'] = self.args.path_to_eval_data
  
    if self.toytest:
      self.rencoder = model.AutoEncoder(layer_sizes=[self.node_count] + [int(l) for l in self.args.hidden_layers.split(',')],
                                        nl_type=self.args.non_linearity_type,
                                        is_constrained=self.args.constrained,
                                        dp_drop_prob=self.args.drop_prob,
                                        last_layer_activations=not self.args.skip_last_layer_nl)
    else:
      self.eval_data_layer = input_layer.UserItemRecDataProvider(params=self.eval_params,
                                                                 user_id_map=self.data_layer.userIdMap,
                                                                 item_id_map=self.data_layer.itemIdMap)
      self.rencoder = model.AutoEncoder(layer_sizes=[self.data_layer.vector_dim] + [int(l) for l in self.args.hidden_layers.split(',')],
                                        nl_type=self.args.non_linearity_type,
                                        is_constrained=self.args.constrained,
                                        dp_drop_prob=self.args.drop_prob,
                                        last_layer_activations=not self.args.skip_last_layer_nl)
  
    self.path_to_model = Path(self.args.save_path)
    if self.path_to_model.is_file():
      print("Loading model from: {}".format(self.path_to_model))
      self.rencoder.load_state_dict(torch.load(self.args.save_path))
  
    if not self.args.silent:
      print('######################################################')
      print('######################################################')
      print('############# AutoEncoder Model: #####################')
      print(self.rencoder)
      print('######################################################')
      print('######################################################')

    self.rencoder.eval()

  
    if self.args.use_cuda: self.rencoder = self.rencoder.cuda()

    if self.toytest == False:
      self.inv_userIdMap = {v: k for k, v in self.data_layer.userIdMap.items()}
      self.inv_itemIdMap = {v: k for k, v in self.data_layer.itemIdMap.items()}
  
      self.eval_data_layer.src_data = self.data_layer.data