in hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java [310:450]
private void applyOperation(long counter) throws Exception {
OmKeyArgs keyArgs;
String keyName;
long threadSeqId;
String startKeyName;
if (mixedOperation) {
threadSeqId = getThreadSequenceId();
operation = operations[(int)threadSeqId];
}
if (randomOp) {
counter = ThreadLocalRandom.current().nextLong(getTestNo());
}
switch (operation) {
case CREATE_KEY:
keyName = getPath(counter);
getMetrics().timer(operation.name()).time(() -> {
try (OutputStream stream = bucket.createKey(keyName, dataSize)) {
contentGenerator.write(stream);
}
return null;
});
break;
case LOOKUP_KEY:
keyName = getPath(counter);
keyArgs = omKeyArgsBuilder.get().setKeyName(keyName).build();
getMetrics().timer(operation.name()).time(() -> {
ozoneManagerClient.lookupKey(keyArgs);
return null;
});
break;
case GET_KEYINFO:
keyName = getPath(counter);
keyArgs = omKeyArgsBuilder.get().setKeyName(keyName).build();
getMetrics().timer(operation.name()).time(() -> {
ozoneManagerClient.getKeyInfo(keyArgs, false);
return null;
});
break;
case HEAD_KEY:
keyName = getPath(counter);
keyArgs = omKeyArgsBuilder.get()
.setKeyName(keyName).setHeadOp(true).build();
getMetrics().timer(operation.name()).time(() -> {
ozoneManagerClient.getKeyInfo(keyArgs, false);
return null;
});
break;
case READ_KEY:
keyName = getPath(counter);
getMetrics().timer(operation.name()).time(() -> {
try (OzoneInputStream stream = bucket.readKey(keyName)) {
while ((stream.read(readBuffer)) >= 0) {
}
}
return null;
});
break;
case READ_FILE:
keyName = getPath(counter);
getMetrics().timer(operation.name()).time(() -> {
try (OzoneInputStream stream = bucket.readFile(keyName)) {
while ((stream.read(readBuffer)) >= 0) {
}
}
return null;
});
break;
case CREATE_FILE:
keyName = getPath(counter);
getMetrics().timer(operation.name()).time(() -> {
try (OutputStream stream = bucket.createFile(
keyName, dataSize, replicationConfig, true, false)) {
contentGenerator.write(stream);
}
return null;
});
break;
case LOOKUP_FILE:
keyName = getPath(counter);
keyArgs = omKeyArgsBuilder.get().setKeyName(keyName).build();
getMetrics().timer(operation.name()).time(() -> {
ozoneManagerClient.lookupFile(keyArgs);
return null;
});
break;
case LIST_KEYS:
threadSeqId = getThreadSequenceId();
startKeyName = getPath(threadSeqId * batchSize);
getMetrics().timer(operation.name()).time(() -> {
List<OmKeyInfo> keyInfoList = ozoneManagerClient.listKeys(
volumeName, bucketName, startKeyName, "", batchSize);
if (keyInfoList.size() + 1 < batchSize) {
throw new NoSuchFileException(
"There are not enough files for testing you should use "
+ "CREATE_FILE to create at least batch-size * threads = "
+ batchSize * getThreadNo());
}
return null;
});
break;
case LIST_STATUS:
threadSeqId = getThreadSequenceId();
startKeyName = getPath(threadSeqId * batchSize);
keyArgs = omKeyArgsBuilder.get().setKeyName("").build();
getMetrics().timer(operation.name()).time(() -> {
List<OzoneFileStatus> fileStatusList = ozoneManagerClient.listStatus(
keyArgs, false, startKeyName, batchSize);
if (fileStatusList.size() + 1 < batchSize) {
throw new NoSuchFileException(
"There are not enough files for testing you should use "
+ "CREATE_FILE to create at least batch-size * threads = "
+ batchSize * getThreadNo());
}
return null;
});
break;
case INFO_BUCKET:
getMetrics().timer(operation.name()).time(() -> {
try {
ozoneManagerClient.getBucketInfo(volumeName, bucketName);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
);
break;
case INFO_VOLUME:
getMetrics().timer(operation.name()).time(() -> {
try {
ozoneManagerClient.getVolumeInfo(volumeName);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
);
break;
default:
throw new IllegalStateException("Unrecognized write command " +
"type request " + operation);
}
}