in evaluation/eval_concap_retrieval.py [0:0]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--features_h5path", default="data/coco/coco_trainval.h5")
# Required parameters
parser.add_argument(
"--val_file",
default="data/cocoRetreival/all_data_final_val_set0_2014.jsonline",
type=str,
help="The input train corpus.",
)
parser.add_argument(
"--bert_model",
default="bert-base-uncased",
type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.",
)
parser.add_argument(
"--pretrained_weight",
default="bert-base-uncased",
type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.",
)
parser.add_argument(
"--output_dir",
default="result",
type=str,
# required=True,
help="The output directory where the model checkpoints will be written.",
)
parser.add_argument(
"--config_file",
default="config/bert_config.json",
type=str,
# required=True,
help="The config file which specified the model details.",
)
## Other parameters
parser.add_argument(
"--max_seq_length",
default=30,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--train_batch_size",
default=128,
type=int,
help="Total batch size for training.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--num_train_epochs",
default=50,
type=int,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--warmup_proportion",
default=0.01,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Whether not to use CUDA when available"
)
parser.add_argument(
"--do_lower_case",
default=True,
type=bool,
help="Whether to lower case the input text. True for uncased models, False for cased models.",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus",
)
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization"
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit float precision instead of 32-bit",
)
parser.add_argument(
"--loss_scale",
type=float,
default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n",
)
parser.add_argument(
"--num_workers",
type=int,
default=20,
help="Number of workers in the dataloader.",
)
parser.add_argument(
"--from_pretrained",
action="store_true",
help="Wheter the tensor is from pretrained.",
)
parser.add_argument(
"--save_name", default="", type=str, help="save name for training."
)
parser.add_argument(
"--baseline",
action="store_true",
help="Wheter to use the baseline model (single bert).",
)
parser.add_argument(
"--zero_shot", action="store_true", help="Wheter directly evaluate."
)
args = parser.parse_args()
print(args)
if args.save_name is not "":
timeStamp = args.save_name
else:
timeStamp = strftime("%d-%b-%y-%X-%a", gmtime())
timeStamp += "_{:0>6d}".format(random.randint(0, 10e6))
savePath = os.path.join(args.output_dir, timeStamp)
if not os.path.exists(savePath):
os.makedirs(savePath)
config = BertConfig.from_json_file(args.config_file)
# save all the hidden parameters.
with open(os.path.join(savePath, "command.txt"), "w") as f:
print(args, file=f) # Python 3.x
print("\n", file=f)
print(config, file=f)
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
)
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
logger.info(
"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16
)
)
if args.gradient_accumulation_steps < 1:
raise ValueError(
"Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps
)
)
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
# if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
# raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# train_examples = None
num_train_optimization_steps = None
print("Loading Train Dataset", args.val_file)
tokenizer = BertTokenizer.from_pretrained(
args.bert_model, do_lower_case=args.do_lower_case
)
image_features_reader = ImageFeaturesH5Reader(args.features_h5path, True)
eval_dataloader = ConceptCapLoaderRetrieval(
None, tokenizer, seq_len=args.max_seq_length, batch_size=args.train_batch_size
)
config.fast_mode = True
if args.from_pretrained:
model = BertForMultiModalPreTraining.from_pretrained(
args.pretrained_weight, config
)
else:
model = BertForMultiModalPreTraining.from_pretrained(args.bert_model, config)
if args.fp16:
model.half()
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
)
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
model.cuda()
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(eval_dataloader))
logger.info(" Batch size = %d", args.train_batch_size)
startIterID = 0
global_step = 0
masked_loss_v_tmp = 0
masked_loss_t_tmp = 0
next_sentence_loss_tmp = 0
loss_tmp = 0
r1, r5, r10, medr, meanr = evaluate(args, model, eval_dataloader)
print("finish evaluation, save result to %s")
val_name = args.val_file.split("/")[-1]
with open(os.path.join(savePath, val_name + "_result.txt"), "w") as f:
print(
"r1:%.3f, r5:%.3f, r10:%.3f, mder:%.3f, meanr:%.3f"
% (r1, r5, r10, medr, meanr),
file=f,
)