def main()

in predict.py [0:0]


def main(params):
    conf = ModelConf('predict', params.conf_path, version, params, mode=params.mode)
    problem = Problem('predict', conf.problem_type, conf.input_types, None,
        with_bos_eos=conf.add_start_end_for_seq, tagging_scheme=conf.tagging_scheme, tokenizer=conf.tokenizer,
        remove_stopwords=conf.remove_stopwords, DBC2SBC=conf.DBC2SBC, unicode_fix=conf.unicode_fix)
        
    if os.path.isfile(conf.saved_problem_path):
        problem.load_problem(conf.saved_problem_path)
        logging.info("Problem loaded!")
        logging.debug("Problem loaded from %s" % conf.saved_problem_path)
    else:
        raise Exception("Problem does not exist!")

    if len(conf.predict_fields_post_check) > 0:
        for field_to_chk in conf.predict_fields_post_check:
            field, target = field_to_chk.split('@')
            if not problem.output_dict.has_cell(target):
                raise Exception("The target %s of %s does not exist in the training data." % (target, field_to_chk))

    lm = LearningMachine('predict', conf, problem, vocab_info=None, initialize=False, use_gpu=conf.use_gpu)
    lm.load_model(conf.previous_model_path)

    if params.predict_mode == 'batch':
        logging.info('Predicting %s with the model saved at %s' % (conf.predict_data_path, conf.previous_model_path))
    if params.predict_mode == 'batch':
        lm.predict(conf.predict_data_path, conf.predict_output_path, conf.predict_file_columns, conf.predict_fields)
        logging.info("Predict done! The predict result: %s" % conf.predict_output_path)
    elif params.predict_mode == 'interactive':
        print('='*80)
        task_type = str(ProblemTypes[problem.problem_type]).split('.')[1]
        sample_format = list(conf.predict_file_columns.keys())
        target_ = conf.conf['inputs'].get('target', None)
        target_list = list(target_) if target_ else []
        for single_element in sample_format[:]:
            if single_element in target_list:
                sample_format.remove(single_element)
        predict_file_columns = {}
        for index, single in enumerate(sample_format):
            predict_file_columns[single] = index
        print('Enabling Interactive Inference Mode for %s Task...' % (task_type.upper()))
        print('%s Task Interactive. The sample format is <%s>' % (task_type.upper(), ', '.join(sample_format)))
        case_cnt = 1
        while True:
            print('Case%d:' % case_cnt)
            sample = []
            for single in sample_format:
                temp_ = input('\t%s: ' % single)
                if temp_.lower() == 'exit':
                    exit(0)
                sample.append(temp_)
            sample = '\t'.join(sample)
            result = lm.interactive([sample], predict_file_columns, conf.predict_fields, params.predict_mode)
            print('\tInference result: %s' % result)
            case_cnt += 1
    else:
        raise Exception('Predict mode support interactive|batch, get %s' % params.predict_mode)