in src/org/apache/pig/backend/hadoop/executionengine/mapReduceLayer/MRCompiler.java [1530:1748]
public void visitMergeJoin(POMergeJoin joinOp) throws VisitorException {
try{
if(compiledInputs.length != 2 || joinOp.getInputs().size() != 2){
int errCode=1101;
throw new MRCompilerException("Merge Join must have exactly two inputs. Found : "+compiledInputs.length, errCode);
}
curMROp = phyToMROpMap.get(joinOp.getInputs().get(0));
MapReduceOper rightMROpr = null;
if(curMROp.equals(compiledInputs[0]))
rightMROpr = compiledInputs[1];
else
rightMROpr = compiledInputs[0];
// We will first operate on right side which is indexer job.
// First yank plan of the compiled right input and set that as an inner plan of right operator.
PhysicalPlan rightPipelinePlan;
if(!rightMROpr.mapDone){
PhysicalPlan rightMapPlan = rightMROpr.mapPlan;
if(rightMapPlan.getRoots().size() != 1){
int errCode = 2171;
String errMsg = "Expected one but found more then one root physical operator in physical plan.";
throw new MRCompilerException(errMsg,errCode,PigException.BUG);
}
PhysicalOperator rightLoader = rightMapPlan.getRoots().get(0);
if(! (rightLoader instanceof POLoad)){
int errCode = 2172;
String errMsg = "Expected physical operator at root to be POLoad. Found : "+rightLoader.getClass().getCanonicalName();
throw new MRCompilerException(errMsg,errCode);
}
if (rightMapPlan.getSuccessors(rightLoader) == null || rightMapPlan.getSuccessors(rightLoader).isEmpty())
// Load - Join case.
rightPipelinePlan = null;
else{ // We got something on right side. Yank it and set it as inner plan of right input.
rightPipelinePlan = rightMapPlan.clone();
PhysicalOperator root = rightPipelinePlan.getRoots().get(0);
rightPipelinePlan.disconnect(root, rightPipelinePlan.getSuccessors(root).get(0));
rightPipelinePlan.remove(root);
rightMapPlan.trimBelow(rightLoader);
}
}
else if(!rightMROpr.reduceDone){
// Indexer must run in map. If we are in reduce, close it and start new MROper.
// No need of yanking in this case. Since we are starting brand new MR Operator and it will contain nothing.
POStore rightStore = getStore();
FileSpec rightStrFile = getTempFileSpec();
rightStore.setSFile(rightStrFile);
rightMROpr.reducePlan.addAsLeaf(rightStore);
rightMROpr.setReduceDone(true);
rightMROpr = startNew(rightStrFile, rightMROpr);
rightPipelinePlan = null;
}
else{
int errCode = 2022;
String msg = "Both map and reduce phases have been done. This is unexpected while compiling.";
throw new PlanException(msg, errCode, PigException.BUG);
}
joinOp.setupRightPipeline(rightPipelinePlan);
rightMROpr.requestedParallelism = 1; // we need exactly one reducer for indexing job.
// At this point, we must be operating on map plan of right input and it would contain nothing else other then a POLoad.
POLoad rightLoader = (POLoad)rightMROpr.mapPlan.getRoots().get(0);
joinOp.setSignature(rightLoader.getSignature());
LoadFunc rightLoadFunc = rightLoader.getLoadFunc();
List<String> udfs = new ArrayList<String>();
if(IndexableLoadFunc.class.isAssignableFrom(rightLoadFunc.getClass())) {
joinOp.setRightLoaderFuncSpec(rightLoader.getLFile().getFuncSpec());
joinOp.setRightInputFileName(rightLoader.getLFile().getFileName());
udfs.add(rightLoader.getLFile().getFuncSpec().toString());
// we don't need the right MROper since
// the right loader is an IndexableLoadFunc which can handle the index
// itself
MRPlan.remove(rightMROpr);
if(rightMROpr == compiledInputs[0]) {
compiledInputs[0] = null;
} else if(rightMROpr == compiledInputs[1]) {
compiledInputs[1] = null;
}
rightMROpr = null;
// validate that the join keys in merge join are only
// simple column projections or '*' and not expression - expressions
// cannot be handled when the index is built by the storage layer on the sorted
// data when the sorted data (and corresponding index) is written.
// So merge join will be restricted not have expressions as
// join keys
int numInputs = mPlan.getPredecessors(joinOp).size(); // should be 2
for(int i = 0; i < numInputs; i++) {
List<PhysicalPlan> keyPlans = joinOp.getInnerPlansOf(i);
for (PhysicalPlan keyPlan : keyPlans) {
for(PhysicalOperator op : keyPlan) {
if(!(op instanceof POProject)) {
int errCode = 1106;
String errMsg = "Merge join is possible only for simple column or '*' join keys when using " +
rightLoader.getLFile().getFuncSpec() + " as the loader";
throw new MRCompilerException(errMsg, errCode, PigException.INPUT);
}
}
}
}
} else {
LoadFunc loadFunc = rightLoader.getLoadFunc();
//Replacing POLoad with indexer is disabled for 'merge-sparse' joins. While
//this feature would be useful, the current implementation of DefaultIndexableLoader
//is not designed to handle multiple calls to seekNear. Specifically, it rereads the entire index
//for each call. Some refactoring of this class is required - and then the check below could be removed.
if (joinOp.getJoinType() == LOJoin.JOINTYPE.MERGESPARSE) {
int errCode = 1104;
String errMsg = "Right input of merge-join must implement IndexableLoadFunc. " +
"The specified loader " + loadFunc + " doesn't implement it";
throw new MRCompilerException(errMsg,errCode);
}
// Replace POLoad with indexer.
if (! (OrderedLoadFunc.class.isAssignableFrom(loadFunc.getClass()))){
int errCode = 1104;
String errMsg = "Right input of merge-join must implement " +
"OrderedLoadFunc interface. The specified loader "
+ loadFunc + " doesn't implement it";
throw new MRCompilerException(errMsg,errCode);
}
String[] indexerArgs = new String[6];
List<PhysicalPlan> rightInpPlans = joinOp.getInnerPlansOf(1);
FileSpec origRightLoaderFileSpec = rightLoader.getLFile();
indexerArgs[0] = origRightLoaderFileSpec.getFuncSpec().toString();
indexerArgs[1] = ObjectSerializer.serialize((Serializable)rightInpPlans);
indexerArgs[2] = ObjectSerializer.serialize(rightPipelinePlan);
indexerArgs[3] = rightLoader.getSignature();
indexerArgs[4] = rightLoader.getOperatorKey().scope;
indexerArgs[5] = Boolean.toString(true);
FileSpec lFile = new FileSpec(rightLoader.getLFile().getFileName(),new FuncSpec(MergeJoinIndexer.class.getName(), indexerArgs));
rightLoader.setLFile(lFile);
// Loader of mro will return a tuple of form -
// (keyFirst1, keyFirst2, .. , position, splitIndex) See MergeJoinIndexer
MRUtil.simpleConnectMapToReduce(rightMROpr, scope, nig);
rightMROpr.useTypedComparator(true);
POStore st = getStore();
FileSpec strFile = getTempFileSpec();
st.setSFile(strFile);
rightMROpr.reducePlan.addAsLeaf(st);
rightMROpr.setReduceDone(true);
// set up the DefaultIndexableLoader for the join operator
String[] defaultIndexableLoaderArgs = new String[5];
defaultIndexableLoaderArgs[0] = origRightLoaderFileSpec.getFuncSpec().toString();
defaultIndexableLoaderArgs[1] = strFile.getFileName();
defaultIndexableLoaderArgs[2] = strFile.getFuncSpec().toString();
defaultIndexableLoaderArgs[3] = joinOp.getOperatorKey().scope;
defaultIndexableLoaderArgs[4] = origRightLoaderFileSpec.getFileName();
joinOp.setRightLoaderFuncSpec((new FuncSpec(DefaultIndexableLoader.class.getName(), defaultIndexableLoaderArgs)));
joinOp.setRightInputFileName(origRightLoaderFileSpec.getFileName());
joinOp.setIndexFile(strFile.getFileName());
udfs.add(origRightLoaderFileSpec.getFuncSpec().toString());
}
// We are done with right side. Lets work on left now.
// Join will be materialized in leftMROper.
if(!curMROp.mapDone) // Life is easy
curMROp.mapPlan.addAsLeaf(joinOp);
else if(!curMROp.reduceDone){ // This is a map-side join. Close this MROper and start afresh.
POStore leftStore = getStore();
FileSpec leftStrFile = getTempFileSpec();
leftStore.setSFile(leftStrFile);
curMROp.reducePlan.addAsLeaf(leftStore);
curMROp.setReduceDone(true);
curMROp = startNew(leftStrFile, curMROp);
curMROp.mapPlan.addAsLeaf(joinOp);
}
else{
int errCode = 2022;
String msg = "Both map and reduce phases have been done. This is unexpected while compiling.";
throw new PlanException(msg, errCode, PigException.BUG);
}
if(rightMROpr != null) {
rightMROpr.markIndexer();
// We want to ensure indexing job runs prior to actual join job. So, connect them in order.
MRPlan.connect(rightMROpr, curMROp);
}
phyToMROpMap.put(joinOp, curMROp);
// no combination of small splits as there is currently no way to guarantee the sortness
// of the combined splits.
curMROp.noCombineSmallSplits();
curMROp.UDFs.addAll(udfs);
}
catch(PlanException e){
int errCode = 2034;
String msg = "Error compiling operator " + joinOp.getClass().getCanonicalName();
throw new MRCompilerException(msg, errCode, PigException.BUG, e);
}
catch (IOException e){
int errCode = 3000;
String errMsg = "IOException caught while compiling POMergeJoin";
throw new MRCompilerException(errMsg, errCode,e);
}
catch(CloneNotSupportedException e){
int errCode = 2127;
String errMsg = "Cloning exception caught while compiling POMergeJoin";
throw new MRCompilerException(errMsg, errCode, PigException.BUG, e);
}
}